Merge remote-tracking branch 'stable/linux-4.1.y' into rpi-4.1.y rpi-bootloader-1.20160202-1
authorpopcornmix <popcornmix@gmail.com>
Mon, 1 Feb 2016 13:42:12 +0000 (13:42 +0000)
committerpopcornmix <popcornmix@gmail.com>
Mon, 1 Feb 2016 13:42:12 +0000 (13:42 +0000)
136 files changed:
Makefile
arch/arm/kvm/mmu.c
arch/arm/net/bpf_jit_32.c
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/suspend.c
arch/arm64/kvm/inject_fault.c
arch/arm64/mm/mmu.c
arch/arm64/net/bpf_jit.h
arch/arm64/net/bpf_jit_comp.c
arch/mips/net/bpf_jit.c
arch/mn10300/Kconfig
arch/powerpc/include/asm/cmpxchg.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/synch.h
arch/powerpc/include/uapi/asm/elf.h
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/platforms/powernv/opal.c
arch/sparc/net/bpf_jit_comp.c
arch/x86/include/asm/boot.h
arch/x86/include/asm/mmu_context.h
arch/x86/kernel/reboot.c
arch/x86/kernel/signal.c
arch/x86/kvm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/mpx.c
arch/x86/mm/tlb.c
arch/x86/xen/suspend.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/connector/connector.c
drivers/hid/hid-core.c
drivers/iommu/intel-iommu.c
drivers/isdn/i4l/isdn_ppp.c
drivers/media/platform/vivid/vivid-osd.c
drivers/media/usb/airspy/airspy.c
drivers/net/bonding/bond_main.c
drivers/net/ppp/ppp_generic.c
drivers/net/slip/slhc.c
drivers/net/slip/slip.c
drivers/net/team/team.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/cdc_ncm.c
drivers/net/veth.c
drivers/net/vxlan.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/parisc/iommu-helpers.h
drivers/staging/lustre/lustre/obdecho/echo_client.c
drivers/usb/core/hub.c
drivers/usb/host/xhci.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ipaq.c
drivers/xen/gntdev.c
fs/direct-io.c
include/linux/filter.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/syscalls.h
include/linux/types.h
include/linux/usb/cdc_ncm.h
include/net/inet_ecn.h
include/net/inet_timewait_sock.h
kernel/bpf/verifier.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/hard-interface.h
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/translation-table.c
net/bridge/br_device.c
net/bridge/br_stp_if.c
net/core/dev.c
net/core/dst.c
net/core/filter.c
net/dccp/minisocks.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_output.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_yeah.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/ip6_output.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_policy.c
net/openvswitch/datapath.c
net/phonet/af_phonet.c
net/sched/sch_generic.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/sysctl.c
net/unix/af_unix.c
net/unix/garbage.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
scripts/recordmcount.c
scripts/recordmcount.h
scripts/recordmcount.pl
sound/core/control.c
sound/core/hrtimer.c
sound/core/pcm_compat.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_compat.c
sound/core/seq/seq_queue.c
sound/core/timer.c
sound/firewire/bebob/Makefile
sound/firewire/dice/Makefile
sound/firewire/fireworks/Makefile
sound/firewire/oxfw/Makefile
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/rme96.c
sound/soc/codecs/arizona.c
sound/soc/codecs/es8328.c
sound/soc/codecs/es8328.h
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8974.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/sh/rcar/gen.c
sound/soc/soc-compress.c
sound/usb/mixer.c
sound/usb/mixer_maps.c
sound/usb/mixer_quirks.c
sound/usb/mixer_quirks.h
sound/usb/quirks.c

index 7609f1d..d398dd4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 1
-SUBLEVEL = 16
+SUBLEVEL = 17
 EXTRAVERSION =
 NAME = Series 4800
 
index 191dcfa..da09ddc 100644 (file)
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
        __kvm_flush_dcache_pud(pud);
 }
 
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+       return !pfn_valid(pfn);
+}
+
 /**
  * stage2_dissolve_pmd() - clear and flush huge PMD entry
  * @kvm:       pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
                        kvm_tlb_flush_vmid_ipa(kvm, addr);
 
                        /* No need to invalidate the cache for device mappings */
-                       if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+                       if (!kvm_is_device_pfn(pte_pfn(old_pte)))
                                kvm_flush_dcache_pte(old_pte);
 
                        put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
 
        pte = pte_offset_kernel(pmd, addr);
        do {
-               if (!pte_none(*pte) &&
-                   (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+               if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
                        kvm_flush_dcache_pte(*pte);
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
        return kvm_vcpu_dabt_iswrite(vcpu);
 }
 
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
-       return !pfn_valid(pfn);
-}
-
 /**
  * stage2_wp_ptes - write protect PMD range
  * @pmd:       pointer to pmd entry
index e0e2358..5fe949b 100644 (file)
@@ -162,19 +162,6 @@ static inline int mem_words_used(struct jit_ctx *ctx)
        return fls(ctx->seen & SEEN_MEM);
 }
 
-static inline bool is_load_to_a(u16 inst)
-{
-       switch (inst) {
-       case BPF_LD | BPF_W | BPF_LEN:
-       case BPF_LD | BPF_W | BPF_ABS:
-       case BPF_LD | BPF_H | BPF_ABS:
-       case BPF_LD | BPF_B | BPF_ABS:
-               return true;
-       default:
-               return false;
-       }
-}
-
 static void jit_fill_hole(void *area, unsigned int size)
 {
        u32 *ptr;
@@ -186,7 +173,6 @@ static void jit_fill_hole(void *area, unsigned int size)
 static void build_prologue(struct jit_ctx *ctx)
 {
        u16 reg_set = saved_regs(ctx);
-       u16 first_inst = ctx->skf->insns[0].code;
        u16 off;
 
 #ifdef CONFIG_FRAME_POINTER
@@ -216,7 +202,7 @@ static void build_prologue(struct jit_ctx *ctx)
                emit(ARM_MOV_I(r_X, 0), ctx);
 
        /* do not leak kernel data to userspace */
-       if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
+       if (bpf_needs_clear_a(&ctx->skf->insns[0]))
                emit(ARM_MOV_I(r_A, 0), ctx);
 
        /* stack space for the BPF_MEM words */
index 17e92f0..3ca894e 100644 (file)
@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
        *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
 }
 
+/*
+ * vcpu_reg should always be passed a register number coming from a
+ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
+ * with banked registers.
+ */
 static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
 {
-       if (vcpu_mode_is_32bit(vcpu))
-               return vcpu_reg32(vcpu, reg_num);
-
        return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
 }
 
index d882b83..608ac6a 100644 (file)
  */
 void ptrace_disable(struct task_struct *child)
 {
+       /*
+        * This would be better off in core code, but PTRACE_DETACH has
+        * grown its fair share of arch-specific worts and changing it
+        * is likely to cause regressions on obscure architectures.
+        */
+       user_disable_single_step(child);
 }
 
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
index 7475313..bbdb53b 100644 (file)
@@ -523,6 +523,10 @@ static int c_show(struct seq_file *m, void *v)
                seq_printf(m, "processor\t: %d\n", i);
 #endif
 
+               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+                          loops_per_jiffy / (500000UL/HZ),
+                          loops_per_jiffy / (5000UL/HZ) % 100);
+
                /*
                 * Dump out the common processor features in a single line.
                 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
index 53f1f8d..3574181 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/ftrace.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <asm/cacheflush.h>
@@ -70,6 +71,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
         */
        local_dbg_save(flags);
 
+       /*
+        * Function graph tracer state gets incosistent when the kernel
+        * calls functions that never return (aka suspend finishers) hence
+        * disable graph tracing during their execution.
+        */
+       pause_graph_tracing();
+
        /*
         * mm context saved on the stack, it will be restored when
         * the cpu comes out of reset through the identity mapped
@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
                        hw_breakpoint_restore(NULL);
        }
 
+       unpause_graph_tracing();
+
        /*
         * Restore pstate flags. OS lock and mdscr have been already
         * restored, so from this point onwards, debugging is fully
index 85c5715..648112e 100644 (file)
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 
        /* Note: These now point to the banked copies */
        *vcpu_spsr(vcpu) = new_spsr_value;
-       *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+       *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
 
        /* Branch to exception vector */
        if (sctlr & (1 << 13))
index 5b8b664..cb34eb8 100644 (file)
@@ -450,6 +450,9 @@ void __init paging_init(void)
 
        empty_zero_page = virt_to_page(zero_page);
 
+       /* Ensure the zero page is visible to the page table walker */
+       dsb(ishst);
+
        /*
         * TTBR0 is only used for the identity mapping at this stage. Make it
         * point to zero page to avoid speculatively fetching new entries.
index 98a26ce..aee5637 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * BPF JIT compiler for ARM64
  *
- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -35,6 +35,7 @@
        aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
                AARCH64_INSN_BRANCH_COMP_##type)
 #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
+#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
 
 /* Conditional branch (immediate) */
 #define A64_COND_BRANCH(cond, offset) \
index c047598..6217f80 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * BPF JIT compiler for ARM64
  *
- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        u8 jmp_cond;
        s32 jmp_offset;
 
+#define check_imm(bits, imm) do {                              \
+       if ((((imm) > 0) && ((imm) >> (bits))) ||               \
+           (((imm) < 0) && (~(imm) >> (bits)))) {              \
+               pr_info("[%2d] imm=%d(0x%x) out of range\n",    \
+                       i, imm, imm);                           \
+               return -EINVAL;                                 \
+       }                                                       \
+} while (0)
+#define check_imm19(imm) check_imm(19, imm)
+#define check_imm26(imm) check_imm(26, imm)
+
        switch (code) {
        /* dst = src */
        case BPF_ALU | BPF_MOV | BPF_X:
@@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                break;
        case BPF_ALU | BPF_DIV | BPF_X:
        case BPF_ALU64 | BPF_DIV | BPF_X:
-               emit(A64_UDIV(is64, dst, dst, src), ctx);
-               break;
        case BPF_ALU | BPF_MOD | BPF_X:
        case BPF_ALU64 | BPF_MOD | BPF_X:
-               ctx->tmp_used = 1;
-               emit(A64_UDIV(is64, tmp, dst, src), ctx);
-               emit(A64_MUL(is64, tmp, tmp, src), ctx);
-               emit(A64_SUB(is64, dst, dst, tmp), ctx);
+       {
+               const u8 r0 = bpf2a64[BPF_REG_0];
+
+               /* if (src == 0) return 0 */
+               jmp_offset = 3; /* skip ahead to else path */
+               check_imm19(jmp_offset);
+               emit(A64_CBNZ(is64, src, jmp_offset), ctx);
+               emit(A64_MOVZ(1, r0, 0, 0), ctx);
+               jmp_offset = epilogue_offset(ctx);
+               check_imm26(jmp_offset);
+               emit(A64_B(jmp_offset), ctx);
+               /* else */
+               switch (BPF_OP(code)) {
+               case BPF_DIV:
+                       emit(A64_UDIV(is64, dst, dst, src), ctx);
+                       break;
+               case BPF_MOD:
+                       ctx->tmp_used = 1;
+                       emit(A64_UDIV(is64, tmp, dst, src), ctx);
+                       emit(A64_MUL(is64, tmp, tmp, src), ctx);
+                       emit(A64_SUB(is64, dst, dst, tmp), ctx);
+                       break;
+               }
                break;
+       }
        case BPF_ALU | BPF_LSH | BPF_X:
        case BPF_ALU64 | BPF_LSH | BPF_X:
                emit(A64_LSLV(is64, dst, dst, src), ctx);
@@ -393,17 +422,6 @@ emit_bswap_uxt:
                emit(A64_ASR(is64, dst, dst, imm), ctx);
                break;
 
-#define check_imm(bits, imm) do {                              \
-       if ((((imm) > 0) && ((imm) >> (bits))) ||               \
-           (((imm) < 0) && (~(imm) >> (bits)))) {              \
-               pr_info("[%2d] imm=%d(0x%x) out of range\n",    \
-                       i, imm, imm);                           \
-               return -EINVAL;                                 \
-       }                                                       \
-} while (0)
-#define check_imm19(imm) check_imm(19, imm)
-#define check_imm26(imm) check_imm(26, imm)
-
        /* JUMP off */
        case BPF_JMP | BPF_JA:
                jmp_offset = bpf2a64_offset(i + off, i, ctx);
index e23fdf2..d6d27d5 100644 (file)
@@ -556,19 +556,6 @@ static inline u16 align_sp(unsigned int num)
        return num;
 }
 
-static bool is_load_to_a(u16 inst)
-{
-       switch (inst) {
-       case BPF_LD | BPF_W | BPF_LEN:
-       case BPF_LD | BPF_W | BPF_ABS:
-       case BPF_LD | BPF_H | BPF_ABS:
-       case BPF_LD | BPF_B | BPF_ABS:
-               return true;
-       default:
-               return false;
-       }
-}
-
 static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
 {
        int i = 0, real_off = 0;
@@ -686,7 +673,6 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx)
 
 static void build_prologue(struct jit_ctx *ctx)
 {
-       u16 first_inst = ctx->skf->insns[0].code;
        int sp_off;
 
        /* Calculate the total offset for the stack pointer */
@@ -700,7 +686,7 @@ static void build_prologue(struct jit_ctx *ctx)
                emit_jit_reg_move(r_X, r_zero, ctx);
 
        /* Do not leak kernel data to userspace */
-       if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
+       if (bpf_needs_clear_a(&ctx->skf->insns[0]))
                emit_jit_reg_move(r_A, r_zero, ctx);
 }
 
index 4434b54..78ae555 100644 (file)
@@ -1,6 +1,7 @@
 config MN10300
        def_bool y
        select HAVE_OPROFILE
+       select HAVE_UID16
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_IPC_PARSE_VERSION
        select HAVE_ARCH_TRACEHOOK
@@ -37,9 +38,6 @@ config HIGHMEM
 config NUMA
        def_bool n
 
-config UID16
-       def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
index d463c68..99897f6 100644 (file)
@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
        unsigned long prev;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%2 \n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %3,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
        : "r" (p), "r" (val)
        : "cc", "memory");
@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
        unsigned long prev;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%2 \n"
        PPC405_ERR77(0,%2)
 "      stdcx.  %3,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
        : "r" (p), "r" (val)
        : "cc", "memory");
@@ -152,14 +152,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
        unsigned int prev;
 
        __asm__ __volatile__ (
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%2         # __cmpxchg_u32\n\
        cmpw    0,%0,%3\n\
        bne-    2f\n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %4,0,%2\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        "\n\
 2:"
        : "=&r" (prev), "+m" (*p)
@@ -198,13 +198,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
        unsigned long prev;
 
        __asm__ __volatile__ (
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%2         # __cmpxchg_u64\n\
        cmpd    0,%0,%3\n\
        bne-    2f\n\
        stdcx.  %4,0,%2\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        "\n\
 2:"
        : "=&r" (prev), "+m" (*p)
index af56b5c..f4f99f0 100644 (file)
 #define MSR_TS_T       __MASK(MSR_TS_T_LG)     /*  Transaction Transactional */
 #define MSR_TS_MASK    (MSR_TS_T | MSR_TS_S)   /* Transaction State bits */
 #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
 #define MSR_TM_TRANSACTIONAL(x)        (((x) & MSR_TS_MASK) == MSR_TS_T)
 #define MSR_TM_SUSPENDED(x)    (((x) & MSR_TS_MASK) == MSR_TS_S)
 
index e682a71..c508686 100644 (file)
@@ -44,7 +44,7 @@ static inline void isync(void)
        MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
 #define PPC_ACQUIRE_BARRIER     "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
 #define PPC_RELEASE_BARRIER     stringify_in_c(LWSYNC) "\n"
-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
 #define PPC_ATOMIC_EXIT_BARRIER         "\n" stringify_in_c(sync) "\n"
 #else
 #define PPC_ACQUIRE_BARRIER
index 59dad11..c2d21d1 100644 (file)
@@ -295,6 +295,8 @@ do {                                                                        \
 #define R_PPC64_TLSLD          108
 #define R_PPC64_TOCSAVE                109
 
+#define R_PPC64_ENTRY          118
+
 #define R_PPC64_REL16          249
 #define R_PPC64_REL16_LO       250
 #define R_PPC64_REL16_HI       251
index 6838451..59663af 100644 (file)
@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                         */
                        break;
 
+               case R_PPC64_ENTRY:
+                       /*
+                        * Optimize ELFv2 large code model entry point if
+                        * the TOC is within 2GB range of current location.
+                        */
+                       value = my_r2(sechdrs, me) - (unsigned long)location;
+                       if (value + 0x80008000 > 0xffffffff)
+                               break;
+                       /*
+                        * Check for the large code model prolog sequence:
+                        *      ld r2, ...(r12)
+                        *      add r2, r2, r12
+                        */
+                       if ((((uint32_t *)location)[0] & ~0xfffc)
+                           != 0xe84c0000)
+                               break;
+                       if (((uint32_t *)location)[1] != 0x7c426214)
+                               break;
+                       /*
+                        * If found, replace it with:
+                        *      addis r2, r12, (.TOC.-func)@ha
+                        *      addi r2, r12, (.TOC.-func)@l
+                        */
+                       ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
+                       ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
+                       break;
+
                case R_PPC64_REL16_HA:
                        /* Subtract location pointer */
                        value -= (unsigned long)location;
index 0596373..c8c8275 100644 (file)
@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
                msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
        }
 
+       /*
+        * Use the current MSR TM suspended bit to track if we have
+        * checkpointed state outstanding.
+        * On signal delivery, we'd normally reclaim the checkpointed
+        * state to obtain stack pointer (see:get_tm_stackpointer()).
+        * This will then directly return to userspace without going
+        * through __switch_to(). However, if the stack frame is bad,
+        * we need to exit this thread which calls __switch_to() which
+        * will again attempt to reclaim the already saved tm state.
+        * Hence we need to check that we've not already reclaimed
+        * this state.
+        * We do this using the current MSR, rather tracking it in
+        * some specific thread_struct bit, as it has the additional
+        * benifit of checking for a potential TM bad thing exception.
+        */
+       if (!MSR_TM_SUSPENDED(mfmsr()))
+               return;
+
        tm_reclaim(thr, thr->regs->msr, cause);
 
        /* Having done the reclaim, we now have the checkpointed
index da50e0c..7356c33 100644 (file)
@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
                return 1;
 #endif /* CONFIG_SPE */
 
+       /* Get the top half of the MSR from the user context */
+       if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+               return 1;
+       msr_hi <<= 32;
+       /* If TM bits are set to the reserved value, it's an invalid context */
+       if (MSR_TM_RESV(msr_hi))
+               return 1;
+       /* Pull in the MSR TM bits from the user context */
+       regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
        /* Now, recheckpoint.  This loads up all of the checkpointed (older)
         * registers, including FP and V[S]Rs.  After recheckpointing, the
         * transactional versions should be loaded.
@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
        current->thread.tm_texasr |= TEXASR_FS;
        /* This loads the checkpointed FP/VEC state, if used */
        tm_recheckpoint(&current->thread, msr);
-       /* Get the top half of the MSR */
-       if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
-               return 1;
-       /* Pull in MSR TM from user context */
-       regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
 
        /* This loads the speculative FP/VEC state, if used */
        if (msr & MSR_FP) {
index c7c24d2..164fd64 100644 (file)
@@ -427,6 +427,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
 
        /* get MSR separately, transfer the LE bit if doing signal return */
        err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+       /* Don't allow reserved mode. */
+       if (MSR_TM_RESV(msr))
+               return -EINVAL;
+
        /* pull in MSR TM from user context */
        regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
 
index f1e0e55..f5b3de7 100644 (file)
@@ -210,6 +210,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
 
 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
 {
+       /*
+        * Check for illegal transactional state bit combination
+        * and if we find it, force the TS field to a safe state.
+        */
+       if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
+               msr &= ~MSR_TS_MASK;
        vcpu->arch.shregs.msr = msr;
        kvmppc_end_cede(vcpu);
 }
index 17cea18..264c473 100644 (file)
@@ -78,18 +78,9 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
                PPC_LI(r_X, 0);
        }
 
-       switch (filter[0].code) {
-       case BPF_RET | BPF_K:
-       case BPF_LD | BPF_W | BPF_LEN:
-       case BPF_LD | BPF_W | BPF_ABS:
-       case BPF_LD | BPF_H | BPF_ABS:
-       case BPF_LD | BPF_B | BPF_ABS:
-               /* first instruction sets A register (or is RET 'constant') */
-               break;
-       default:
-               /* make sure we dont leak kernel information to user */
+       /* make sure we dont leak kernel information to user */
+       if (bpf_needs_clear_a(&filter[0]))
                PPC_LI(r_A, 0);
-       }
 }
 
 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
index 2241565..b831a2e 100644 (file)
@@ -358,7 +358,7 @@ static void opal_handle_message(void)
 
        /* Sanity check */
        if (type >= OPAL_MSG_TYPE_MAX) {
-               pr_warning("%s: Unknown message type: %u\n", __func__, type);
+               pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
                return;
        }
        opal_message_do_notify(type, (void *)&msg);
index 7931eee..8109e92 100644 (file)
@@ -420,22 +420,9 @@ void bpf_jit_compile(struct bpf_prog *fp)
                }
                emit_reg_move(O7, r_saved_O7);
 
-               switch (filter[0].code) {
-               case BPF_RET | BPF_K:
-               case BPF_LD | BPF_W | BPF_LEN:
-               case BPF_LD | BPF_W | BPF_ABS:
-               case BPF_LD | BPF_H | BPF_ABS:
-               case BPF_LD | BPF_B | BPF_ABS:
-                       /* The first instruction sets the A register (or is
-                        * a "RET 'constant'")
-                        */
-                       break;
-               default:
-                       /* Make sure we dont leak kernel information to the
-                        * user.
-                        */
+               /* Make sure we dont leak kernel information to the user. */
+               if (bpf_needs_clear_a(&filter[0]))
                        emit_clear(r_A); /* A = 0 */
-               }
 
                for (i = 0; i < flen; i++) {
                        unsigned int K = filter[i].k;
index 4fa687a..6b8d6e8 100644 (file)
@@ -27,7 +27,7 @@
 #define BOOT_HEAP_SIZE             0x400000
 #else /* !CONFIG_KERNEL_BZIP2 */
 
-#define BOOT_HEAP_SIZE 0x8000
+#define BOOT_HEAP_SIZE 0x10000
 
 #endif /* !CONFIG_KERNEL_BZIP2 */
 
index 80d67dd..73e38f1 100644 (file)
@@ -104,8 +104,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 #endif
                cpumask_set_cpu(cpu, mm_cpumask(next));
 
-               /* Re-load page tables */
+               /*
+                * Re-load page tables.
+                *
+                * This logic has an ordering constraint:
+                *
+                *  CPU 0: Write to a PTE for 'next'
+                *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
+                *  CPU 1: set bit 1 in next's mm_cpumask
+                *  CPU 1: load from the PTE that CPU 0 writes (implicit)
+                *
+                * We need to prevent an outcome in which CPU 1 observes
+                * the new PTE value and CPU 0 observes bit 1 clear in
+                * mm_cpumask.  (If that occurs, then the IPI will never
+                * be sent, and CPU 0's TLB will contain a stale entry.)
+                *
+                * The bad outcome can occur if either CPU's load is
+                * reordered before that CPU's store, so both CPUs must
+                * execute full barriers to prevent this from happening.
+                *
+                * Thus, switch_mm needs a full barrier between the
+                * store to mm_cpumask and any operation that could load
+                * from next->pgd.  TLB fills are special and can happen
+                * due to instruction fetches or for no reason at all,
+                * and neither LOCK nor MFENCE orders them.
+                * Fortunately, load_cr3() is serializing and gives the
+                * ordering guarantee we need.
+                *
+                */
                load_cr3(next->pgd);
+
                trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
 
                /* Stop flush ipis for the previous mm */
@@ -142,10 +170,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                         * schedule, protecting us from simultaneous changes.
                         */
                        cpumask_set_cpu(cpu, mm_cpumask(next));
+
                        /*
                         * We were in lazy tlb mode and leave_mm disabled
                         * tlb flush IPI delivery. We must reload CR3
                         * to make sure to use no freed page tables.
+                        *
+                        * As above, load_cr3() is serializing and orders TLB
+                        * fills with respect to the mm_cpumask write.
                         */
                        load_cr3(next->pgd);
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
index 86db4bc..0549ae3 100644 (file)
@@ -182,6 +182,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
                },
        },
+       {       /* Handle problems with rebooting on the iMac10,1. */
+               .callback = set_pci_reboot,
+               .ident = "Apple iMac10,1",
+               .matches = {
+                   DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+                   DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
+               },
+       },
 
        /* ASRock */
        {       /* Handle problems with rebooting on ASRock Q1900DC-ITX */
index e0fd5f4..5d2e2e9 100644 (file)
@@ -667,12 +667,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
        signal_setup_done(failed, ksig, stepping);
 }
 
-#ifdef CONFIG_X86_32
-#define NR_restart_syscall     __NR_restart_syscall
-#else /* !CONFIG_X86_32 */
-#define NR_restart_syscall     \
-       test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
-#endif /* CONFIG_X86_32 */
+static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
+{
+#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
+       return __NR_restart_syscall;
+#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
+       return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
+               __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
+#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
+}
 
 /*
  * Note that 'init' is a special process: it doesn't get signals it doesn't
@@ -701,7 +704,7 @@ static void do_signal(struct pt_regs *regs)
                        break;
 
                case -ERESTART_RESTARTBLOCK:
-                       regs->ax = NR_restart_syscall;
+                       regs->ax = get_nr_restart_syscall(regs);
                        regs->ip -= 2;
                        break;
                }
index 454ccb0..0d039cd 100644 (file)
@@ -1106,6 +1106,7 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_exception_intercept(svm, UD_VECTOR);
        set_exception_intercept(svm, MC_VECTOR);
        set_exception_intercept(svm, AC_VECTOR);
+       set_exception_intercept(svm, DB_VECTOR);
 
        set_intercept(svm, INTERCEPT_INTR);
        set_intercept(svm, INTERCEPT_NMI);
@@ -1638,20 +1639,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
        mark_dirty(svm->vmcb, VMCB_SEG);
 }
 
-static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
+static void update_bp_intercept(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       clr_exception_intercept(svm, DB_VECTOR);
        clr_exception_intercept(svm, BP_VECTOR);
 
-       if (svm->nmi_singlestep)
-               set_exception_intercept(svm, DB_VECTOR);
-
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
-               if (vcpu->guest_debug &
-                   (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-                       set_exception_intercept(svm, DB_VECTOR);
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
                        set_exception_intercept(svm, BP_VECTOR);
        } else
@@ -1757,7 +1751,6 @@ static int db_interception(struct vcpu_svm *svm)
                if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
                        svm->vmcb->save.rflags &=
                                ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
-               update_db_bp_intercept(&svm->vcpu);
        }
 
        if (svm->vcpu.guest_debug &
@@ -3751,7 +3744,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
         */
        svm->nmi_singlestep = true;
        svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
-       update_db_bp_intercept(vcpu);
 }
 
 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -4367,7 +4359,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .vcpu_load = svm_vcpu_load,
        .vcpu_put = svm_vcpu_put,
 
-       .update_db_bp_intercept = update_db_bp_intercept,
+       .update_db_bp_intercept = update_bp_intercept,
        .get_msr = svm_get_msr,
        .set_msr = svm_set_msr,
        .get_segment_base = svm_get_segment_base,
index 7c7bc8b..21dda13 100644 (file)
@@ -250,7 +250,7 @@ TRACE_EVENT(kvm_inj_virq,
 #define kvm_trace_sym_exc                                              \
        EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),  \
        EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),           \
-       EXS(MF), EXS(MC)
+       EXS(MF), EXS(AC), EXS(MC)
 
 /*
  * Tracepoint for kvm interrupt injection:
index a243854..945f9e1 100644 (file)
@@ -3652,20 +3652,21 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                if (!is_paging(vcpu)) {
                        hw_cr4 &= ~X86_CR4_PAE;
                        hw_cr4 |= X86_CR4_PSE;
-                       /*
-                        * SMEP/SMAP is disabled if CPU is in non-paging mode
-                        * in hardware. However KVM always uses paging mode to
-                        * emulate guest non-paging mode with TDP.
-                        * To emulate this behavior, SMEP/SMAP needs to be
-                        * manually disabled when guest switches to non-paging
-                        * mode.
-                        */
-                       hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
                } else if (!(cr4 & X86_CR4_PAE)) {
                        hw_cr4 &= ~X86_CR4_PAE;
                }
        }
 
+       if (!enable_unrestricted_guest && !is_paging(vcpu))
+               /*
+                * SMEP/SMAP is disabled if CPU is in non-paging mode in
+                * hardware.  However KVM always uses paging mode without
+                * unrestricted guest.
+                * To emulate this behavior, SMEP/SMAP needs to be manually
+                * disabled when guest switches to non-paging mode.
+                */
+               hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
+
        vmcs_writel(CR4_READ_SHADOW, cr4);
        vmcs_writel(GUEST_CR4, hw_cr4);
        return 0;
index 47a32f7..fed4c84 100644 (file)
@@ -940,7 +940,7 @@ static u32 msrs_to_save[] = {
        MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
 #endif
        MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
-       MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
+       MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
 };
 
 static unsigned num_msrs_to_save;
@@ -4117,16 +4117,17 @@ static void kvm_init_msr_list(void)
 
                /*
                 * Even MSRs that are valid in the host may not be exposed
-                * to the guests in some cases.  We could work around this
-                * in VMX with the generic MSR save/load machinery, but it
-                * is not really worthwhile since it will really only
-                * happen with nested virtualization.
+                * to the guests in some cases.
                 */
                switch (msrs_to_save[i]) {
                case MSR_IA32_BNDCFGS:
                        if (!kvm_x86_ops->mpx_supported())
                                continue;
                        break;
+               case MSR_TSC_AUX:
+                       if (!kvm_x86_ops->rdtscp_supported())
+                               continue;
+                       break;
                default:
                        break;
                }
index 4d1c11c..f738c61 100644 (file)
@@ -120,19 +120,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
        switch (type) {
        case REG_TYPE_RM:
                regno = X86_MODRM_RM(insn->modrm.value);
-               if (X86_REX_B(insn->rex_prefix.value) == 1)
+               if (X86_REX_B(insn->rex_prefix.value))
                        regno += 8;
                break;
 
        case REG_TYPE_INDEX:
                regno = X86_SIB_INDEX(insn->sib.value);
-               if (X86_REX_X(insn->rex_prefix.value) == 1)
+               if (X86_REX_X(insn->rex_prefix.value))
                        regno += 8;
                break;
 
        case REG_TYPE_BASE:
                regno = X86_SIB_BASE(insn->sib.value);
-               if (X86_REX_B(insn->rex_prefix.value) == 1)
+               if (X86_REX_B(insn->rex_prefix.value))
                        regno += 8;
                break;
 
index 90b924a..061e011 100644 (file)
@@ -160,7 +160,10 @@ void flush_tlb_current_task(void)
        preempt_disable();
 
        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+
+       /* This is an implicit full barrier that synchronizes with switch_mm. */
        local_flush_tlb();
+
        trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
                flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
@@ -187,17 +190,29 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
        unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
 
        preempt_disable();
-       if (current->active_mm != mm)
+       if (current->active_mm != mm) {
+               /* Synchronize with switch_mm. */
+               smp_mb();
+
                goto out;
+       }
 
        if (!current->mm) {
                leave_mm(smp_processor_id());
+
+               /* Synchronize with switch_mm. */
+               smp_mb();
+
                goto out;
        }
 
        if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
                base_pages_to_flush = (end - start) >> PAGE_SHIFT;
 
+       /*
+        * Both branches below are implicit full barriers (MOV to CR or
+        * INVLPG) that synchronize with switch_mm.
+        */
        if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
                base_pages_to_flush = TLB_FLUSH_ALL;
                count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
@@ -227,10 +242,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
        preempt_disable();
 
        if (current->active_mm == mm) {
-               if (current->mm)
+               if (current->mm) {
+                       /*
+                        * Implicit full barrier (INVLPG) that synchronizes
+                        * with switch_mm.
+                        */
                        __flush_tlb_one(start);
-               else
+               } else {
                        leave_mm(smp_processor_id());
+
+                       /* Synchronize with switch_mm. */
+                       smp_mb();
+               }
        }
 
        if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
index 53b4c08..6d34151 100644 (file)
@@ -32,7 +32,8 @@ static void xen_hvm_post_suspend(int suspend_cancelled)
 {
 #ifdef CONFIG_XEN_PVHVM
        int cpu;
-       xen_hvm_init_shared_info();
+       if (!suspend_cancelled)
+           xen_hvm_init_shared_info();
        xen_callback_vector();
        xen_unplug_emulated_devices();
        if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
index 8a45e92..0522270 100644 (file)
@@ -404,18 +404,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
        return rv;
 }
 
-static void start_check_enables(struct smi_info *smi_info)
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+       smi_info->last_timeout_jiffies = jiffies;
+       mod_timer(&smi_info->si_timer, new_val);
+       smi_info->timer_running = true;
+}
+
+/*
+ * Start a new message and (re)start the timer and thread.
+ */
+static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
+                         unsigned int size)
+{
+       smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+       if (smi_info->thread)
+               wake_up_process(smi_info->thread);
+
+       smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
+}
+
+static void start_check_enables(struct smi_info *smi_info, bool start_timer)
 {
        unsigned char msg[2];
 
        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
-       smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+       if (start_timer)
+               start_new_msg(smi_info, msg, 2);
+       else
+               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
        smi_info->si_state = SI_CHECKING_ENABLES;
 }
 
-static void start_clear_flags(struct smi_info *smi_info)
+static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
 {
        unsigned char msg[3];
 
@@ -424,7 +448,10 @@ static void start_clear_flags(struct smi_info *smi_info)
        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
        msg[2] = WDT_PRE_TIMEOUT_INT;
 
-       smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+       if (start_timer)
+               start_new_msg(smi_info, msg, 3);
+       else
+               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
        smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
@@ -434,10 +461,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
        smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
        smi_info->curr_msg->data_size = 2;
 
-       smi_info->handlers->start_transaction(
-               smi_info->si_sm,
-               smi_info->curr_msg->data,
-               smi_info->curr_msg->data_size);
+       start_new_msg(smi_info, smi_info->curr_msg->data,
+                     smi_info->curr_msg->data_size);
        smi_info->si_state = SI_GETTING_MESSAGES;
 }
 
@@ -447,20 +472,11 @@ static void start_getting_events(struct smi_info *smi_info)
        smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
        smi_info->curr_msg->data_size = 2;
 
-       smi_info->handlers->start_transaction(
-               smi_info->si_sm,
-               smi_info->curr_msg->data,
-               smi_info->curr_msg->data_size);
+       start_new_msg(smi_info, smi_info->curr_msg->data,
+                     smi_info->curr_msg->data_size);
        smi_info->si_state = SI_GETTING_EVENTS;
 }
 
-static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
-{
-       smi_info->last_timeout_jiffies = jiffies;
-       mod_timer(&smi_info->si_timer, new_val);
-       smi_info->timer_running = true;
-}
-
 /*
  * When we have a situtaion where we run out of memory and cannot
  * allocate messages, we just leave them in the BMC and run the system
@@ -470,11 +486,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
  * Note that we cannot just use disable_irq(), since the interrupt may
  * be shared.
  */
-static inline bool disable_si_irq(struct smi_info *smi_info)
+static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
 {
        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = true;
-               start_check_enables(smi_info);
+               start_check_enables(smi_info, start_timer);
                return true;
        }
        return false;
@@ -484,7 +500,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = false;
-               start_check_enables(smi_info);
+               start_check_enables(smi_info, true);
                return true;
        }
        return false;
@@ -502,7 +518,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
 
        msg = ipmi_alloc_smi_msg();
        if (!msg) {
-               if (!disable_si_irq(smi_info))
+               if (!disable_si_irq(smi_info, true))
                        smi_info->si_state = SI_NORMAL;
        } else if (enable_si_irq(smi_info)) {
                ipmi_free_smi_msg(msg);
@@ -518,7 +534,7 @@ static void handle_flags(struct smi_info *smi_info)
                /* Watchdog pre-timeout */
                smi_inc_stat(smi_info, watchdog_pretimeouts);
 
-               start_clear_flags(smi_info);
+               start_clear_flags(smi_info, true);
                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
                if (smi_info->intf)
                        ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -870,8 +886,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
                        msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 
-                       smi_info->handlers->start_transaction(
-                               smi_info->si_sm, msg, 2);
+                       start_new_msg(smi_info, msg, 2);
                        smi_info->si_state = SI_GETTING_FLAGS;
                        goto restart;
                }
@@ -901,7 +916,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                 * disable and messages disabled.
                 */
                if (smi_info->supports_event_msg_buff || smi_info->irq) {
-                       start_check_enables(smi_info);
+                       start_check_enables(smi_info, true);
                } else {
                        smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
                        if (!smi_info->curr_msg)
@@ -1203,14 +1218,14 @@ static int smi_start_processing(void       *send_info,
 
        new_smi->intf = intf;
 
-       /* Try to claim any interrupts. */
-       if (new_smi->irq_setup)
-               new_smi->irq_setup(new_smi);
-
        /* Set up the timer that drives the interface. */
        setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
        smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
 
+       /* Try to claim any interrupts. */
+       if (new_smi->irq_setup)
+               new_smi->irq_setup(new_smi);
+
        /*
         * Check if the user forcefully enabled the daemon.
         */
@@ -3515,7 +3530,7 @@ static int try_smi_init(struct smi_info *new_smi)
         * Start clearing the flags before we enable interrupts or the
         * timer to avoid racing with the timer.
         */
-       start_clear_flags(new_smi);
+       start_clear_flags(new_smi, false);
 
        /*
         * IRQ is defined to be set when non-zero.  req_events will
@@ -3817,7 +3832,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
                poll(to_clean);
                schedule_timeout_uninterruptible(1);
        }
-       disable_si_irq(to_clean);
+       disable_si_irq(to_clean, false);
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
                schedule_timeout_uninterruptible(1);
index 30f5228..c19e7fc 100644 (file)
@@ -178,26 +178,21 @@ static int cn_call_callback(struct sk_buff *skb)
  *
  * It checks skb, netlink header and msg sizes, and calls callback helper.
  */
-static void cn_rx_skb(struct sk_buff *__skb)
+static void cn_rx_skb(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh;
-       struct sk_buff *skb;
        int len, err;
 
-       skb = skb_get(__skb);
-
        if (skb->len >= NLMSG_HDRLEN) {
                nlh = nlmsg_hdr(skb);
                len = nlmsg_len(nlh);
 
                if (len < (int)sizeof(struct cn_msg) ||
                    skb->len < nlh->nlmsg_len ||
-                   len > CONNECTOR_MAX_MSG_SIZE) {
-                       kfree_skb(skb);
+                   len > CONNECTOR_MAX_MSG_SIZE)
                        return;
-               }
 
-               err = cn_call_callback(skb);
+               err = cn_call_callback(skb_get(skb));
                if (err < 0)
                        kfree_skb(skb);
        }
index 722a925..9ce9dfe 100644 (file)
@@ -1589,7 +1589,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
                "Multi-Axis Controller"
        };
        const char *type, *bus;
-       char buf[64];
+       char buf[64] = "";
        unsigned int i;
        int len;
        int ret;
index 8b0178d..b85a861 100644 (file)
@@ -3928,14 +3928,17 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
        dev = pci_physfn(dev);
        for (bus = dev->bus; bus; bus = bus->parent) {
                bridge = bus->self;
-               if (!bridge || !pci_is_pcie(bridge) ||
+               /* If it's an integrated device, allow ATS */
+               if (!bridge)
+                       return 1;
+               /* Connected via non-PCIe: no ATS */
+               if (!pci_is_pcie(bridge) ||
                    pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
                        return 0;
+               /* If we found the root port, look it up in the ATSR */
                if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
                        break;
        }
-       if (!bridge)
-               return 0;
 
        rcu_read_lock();
        list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
index c4198fa..9c1e8ad 100644 (file)
@@ -301,6 +301,8 @@ isdn_ppp_open(int min, struct file *file)
        is->compflags = 0;
 
        is->reset = isdn_ppp_ccp_reset_alloc(is);
+       if (!is->reset)
+               return -ENOMEM;
 
        is->lp = NULL;
        is->mp_seqno = 0;       /* MP sequence number */
@@ -320,6 +322,10 @@ isdn_ppp_open(int min, struct file *file)
         * VJ header compression init
         */
        is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */
+       if (IS_ERR(is->slcomp)) {
+               isdn_ppp_ccp_reset_free(is);
+               return PTR_ERR(is->slcomp);
+       }
 #endif
 #ifdef CONFIG_IPPP_FILTER
        is->pass_filter = NULL;
@@ -567,10 +573,8 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
                        is->maxcid = val;
 #ifdef CONFIG_ISDN_PPP_VJ
                        sltmp = slhc_init(16, val);
-                       if (!sltmp) {
-                               printk(KERN_ERR "ippp, can't realloc slhc struct\n");
-                               return -ENOMEM;
-                       }
+                       if (IS_ERR(sltmp))
+                               return PTR_ERR(sltmp);
                        if (is->slcomp)
                                slhc_free(is->slcomp);
                        is->slcomp = sltmp;
index 084d346..e15eef6 100644 (file)
@@ -85,6 +85,7 @@ static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg)
        case FBIOGET_VBLANK: {
                struct fb_vblank vblank;
 
+               memset(&vblank, 0, sizeof(vblank));
                vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT |
                        FB_VBLANK_HAVE_VSYNC;
                vblank.count = 0;
index 4069234..a50750c 100644 (file)
@@ -132,7 +132,7 @@ struct airspy {
        int            urbs_submitted;
 
        /* USB control message buffer */
-       #define BUF_SIZE 24
+       #define BUF_SIZE 128
        u8 buf[BUF_SIZE];
 
        /* Current configuration */
index 16d87bf..72ba774 100644 (file)
@@ -1194,7 +1194,6 @@ static int bond_master_upper_dev_link(struct net_device *bond_dev,
        err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
        if (err)
                return err;
-       slave_dev->flags |= IFF_SLAVE;
        rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
        return 0;
 }
@@ -1452,6 +1451,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
+       /* set slave flag before open to prevent IPv6 addrconf */
+       slave_dev->flags |= IFF_SLAVE;
+
        /* open the slave since the application closed it */
        res = dev_open(slave_dev);
        if (res) {
@@ -1712,6 +1714,7 @@ err_close:
        dev_close(slave_dev);
 
 err_restore_mac:
+       slave_dev->flags &= ~IFF_SLAVE;
        if (!bond->params.fail_over_mac ||
            BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /* XXX TODO - fom follow mode needs to change master's
index 9d15566..cfe49a0 100644 (file)
@@ -715,10 +715,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        val &= 0xffff;
                }
                vj = slhc_init(val2+1, val+1);
-               if (!vj) {
-                       netdev_err(ppp->dev,
-                                  "PPP: no memory (VJ compressor)\n");
-                       err = -ENOMEM;
+               if (IS_ERR(vj)) {
+                       err = PTR_ERR(vj);
                        break;
                }
                ppp_lock(ppp);
index 079f7ad..27ed252 100644 (file)
@@ -84,8 +84,9 @@ static long decode(unsigned char **cpp);
 static unsigned char * put16(unsigned char *cp, unsigned short x);
 static unsigned short pull16(unsigned char **cpp);
 
-/* Initialize compression data structure
+/* Allocate compression data structure
  *     slots must be in range 0 to 255 (zero meaning no compression)
+ * Returns pointer to structure or ERR_PTR() on error.
  */
 struct slcompress *
 slhc_init(int rslots, int tslots)
@@ -94,11 +95,14 @@ slhc_init(int rslots, int tslots)
        register struct cstate *ts;
        struct slcompress *comp;
 
+       if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
+               return ERR_PTR(-EINVAL);
+
        comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
        if (! comp)
                goto out_fail;
 
-       if ( rslots > 0  &&  rslots < 256 ) {
+       if (rslots > 0) {
                size_t rsize = rslots * sizeof(struct cstate);
                comp->rstate = kzalloc(rsize, GFP_KERNEL);
                if (! comp->rstate)
@@ -106,7 +110,7 @@ slhc_init(int rslots, int tslots)
                comp->rslot_limit = rslots - 1;
        }
 
-       if ( tslots > 0  &&  tslots < 256 ) {
+       if (tslots > 0) {
                size_t tsize = tslots * sizeof(struct cstate);
                comp->tstate = kzalloc(tsize, GFP_KERNEL);
                if (! comp->tstate)
@@ -141,7 +145,7 @@ out_free2:
 out_free:
        kfree(comp);
 out_fail:
-       return NULL;
+       return ERR_PTR(-ENOMEM);
 }
 
 
index 05387b1..a17d86a 100644 (file)
@@ -164,7 +164,7 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
        if (cbuff == NULL)
                goto err_exit;
        slcomp = slhc_init(16, 16);
-       if (slcomp == NULL)
+       if (IS_ERR(slcomp))
                goto err_exit;
 #endif
        spin_lock_bh(&sl->lock);
index 6928448..2b45d01 100644 (file)
@@ -1845,10 +1845,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
        struct team *team = netdev_priv(dev);
        struct team_port *port;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(port, &team->port_list, list)
+       mutex_lock(&team->lock);
+       list_for_each_entry(port, &team->port_list, list)
                vlan_vid_del(port->dev, proto, vid);
-       rcu_read_unlock();
+       mutex_unlock(&team->lock);
 
        return 0;
 }
index e4b7a47..5efaa9a 100644 (file)
@@ -100,7 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
        .ndo_stop             = usbnet_stop,
        .ndo_start_xmit       = usbnet_start_xmit,
        .ndo_tx_timeout       = usbnet_tx_timeout,
-       .ndo_change_mtu       = usbnet_change_mtu,
+       .ndo_change_mtu       = cdc_ncm_change_mtu,
        .ndo_set_mac_address  = eth_mac_addr,
        .ndo_validate_addr    = eth_validate_addr,
        .ndo_vlan_rx_add_vid  = cdc_mbim_rx_add_vid,
index 8067b8f..0b481c3 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/ctype.h>
+#include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/workqueue.h>
 #include <linux/mii.h>
@@ -687,6 +688,33 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
        kfree(ctx);
 }
 
+/* we need to override the usbnet change_mtu ndo for two reasons:
+ *  - respect the negotiated maximum datagram size
+ *  - avoid unwanted changes to rx and tx buffers
+ */
+int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
+
+       if (new_mtu <= 0 || new_mtu > maxmtu)
+               return -EINVAL;
+       net->mtu = new_mtu;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
+
+static const struct net_device_ops cdc_ncm_netdev_ops = {
+       .ndo_open            = usbnet_open,
+       .ndo_stop            = usbnet_stop,
+       .ndo_start_xmit      = usbnet_start_xmit,
+       .ndo_tx_timeout      = usbnet_tx_timeout,
+       .ndo_change_mtu      = cdc_ncm_change_mtu,
+       .ndo_set_mac_address = eth_mac_addr,
+       .ndo_validate_addr   = eth_validate_addr,
+};
+
 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
 {
        const struct usb_cdc_union_desc *union_desc = NULL;
@@ -861,6 +889,9 @@ advance:
        /* add our sysfs attrs */
        dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
 
+       /* must handle MTU changes */
+       dev->net->netdev_ops = &cdc_ncm_netdev_ops;
+
        return 0;
 
 error2:
index c8186ff..2e61a79 100644 (file)
@@ -117,12 +117,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
                kfree_skb(skb);
                goto drop;
        }
-       /* don't change ip_summed == CHECKSUM_PARTIAL, as that
-        * will cause bad checksum on forwarded packets
-        */
-       if (skb->ip_summed == CHECKSUM_NONE &&
-           rcv->features & NETIF_F_RXCSUM)
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
        if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
                struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
index 0085b8d..940f78e 100644 (file)
@@ -2581,7 +2581,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
        struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
-       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
        struct vxlan_rdst *dst = &vxlan->default_dst;
        __u32 vni;
        int err;
@@ -2714,9 +2714,13 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
                vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
 
-       if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
-                          vxlan->dst_port, vxlan->flags)) {
-               pr_info("duplicate VNI %u\n", vni);
+       list_for_each_entry(tmp, &vn->vxlan_list, next) {
+               if (tmp->default_dst.remote_vni == vni &&
+                   (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
+                    tmp->saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
+                   tmp->dst_port == vxlan->dst_port &&
+                   (tmp->flags & VXLAN_F_RCV_FLAGS) ==
+                   (vxlan->flags & VXLAN_F_RCV_FLAGS))
                return -EEXIST;
        }
 
index 0866c5d..5e5b618 100644 (file)
@@ -2007,8 +2007,11 @@ static int __init netback_init(void)
        if (!xen_domain())
                return -ENODEV;
 
-       /* Allow as many queues as there are CPUs, by default */
-       xenvif_max_queues = num_online_cpus();
+       /* Allow as many queues as there are CPUs if user has not
+        * specified a value.
+        */
+       if (xenvif_max_queues == 0)
+               xenvif_max_queues = num_online_cpus();
 
        if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
                pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
index 52f081f..fd51626 100644 (file)
@@ -1710,19 +1710,19 @@ static void xennet_destroy_queues(struct netfront_info *info)
 }
 
 static int xennet_create_queues(struct netfront_info *info,
-                               unsigned int num_queues)
+                               unsigned int *num_queues)
 {
        unsigned int i;
        int ret;
 
-       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+       info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
                               GFP_KERNEL);
        if (!info->queues)
                return -ENOMEM;
 
        rtnl_lock();
 
-       for (i = 0; i < num_queues; i++) {
+       for (i = 0; i < *num_queues; i++) {
                struct netfront_queue *queue = &info->queues[i];
 
                queue->id = i;
@@ -1732,7 +1732,7 @@ static int xennet_create_queues(struct netfront_info *info,
                if (ret < 0) {
                        dev_warn(&info->netdev->dev,
                                 "only created %d queues\n", i);
-                       num_queues = i;
+                       *num_queues = i;
                        break;
                }
 
@@ -1742,11 +1742,11 @@ static int xennet_create_queues(struct netfront_info *info,
                        napi_enable(&queue->napi);
        }
 
-       netif_set_real_num_tx_queues(info->netdev, num_queues);
+       netif_set_real_num_tx_queues(info->netdev, *num_queues);
 
        rtnl_unlock();
 
-       if (num_queues == 0) {
+       if (*num_queues == 0) {
                dev_err(&info->netdev->dev, "no queues\n");
                return -EINVAL;
        }
@@ -1792,7 +1792,7 @@ static int talk_to_netback(struct xenbus_device *dev,
        if (info->queues)
                xennet_destroy_queues(info);
 
-       err = xennet_create_queues(info, num_queues);
+       err = xennet_create_queues(info, &num_queues);
        if (err < 0)
                goto destroy_ring;
 
@@ -2140,8 +2140,11 @@ static int __init netif_init(void)
 
        pr_info("Initialising Xen virtual ethernet driver\n");
 
-       /* Allow as many queues as there are CPUs, by default */
-       xennet_max_queues = num_online_cpus();
+       /* Allow as many queues as there are CPUs if user has not
+        * specified a value.
+        */
+       if (xennet_max_queues == 0)
+               xennet_max_queues = num_online_cpus();
 
        return xenbus_register_frontend(&netfront_driver);
 }
index 761e77b..e56f156 100644 (file)
@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
        struct scatterlist *contig_sg;     /* contig chunk head */
        unsigned long dma_offset, dma_len; /* start/len of DMA stream */
        unsigned int n_mappings = 0;
-       unsigned int max_seg_size = dma_get_max_seg_size(dev);
+       unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
+                                       (unsigned)DMA_CHUNK_SIZE);
+       unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
+       if (max_seg_boundary)   /* check if the addition above didn't overflow */
+               max_seg_size = min(max_seg_size, max_seg_boundary);
 
        while (nents > 0) {
 
@@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
 
                        /*
                        ** First make sure current dma stream won't
-                       ** exceed DMA_CHUNK_SIZE if we coalesce the
+                       ** exceed max_seg_size if we coalesce the
                        ** next entry.
                        */   
-                       if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
-                                           IOVP_SIZE) > DMA_CHUNK_SIZE))
-                               break;
-
-                       if (startsg->length + dma_len > max_seg_size)
+                       if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
+                                    max_seg_size))
                                break;
 
                        /*
index d542e06..10e520d 100644 (file)
@@ -1268,6 +1268,7 @@ static int
 echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
 {
        struct lov_stripe_md *ulsm = _ulsm;
+       struct lov_oinfo **p;
        int nob, i;
 
        nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
@@ -1277,9 +1278,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
        if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
                return -EFAULT;
 
-       for (i = 0; i < lsm->lsm_stripe_count; i++) {
-               if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
-                                     sizeof(lsm->lsm_oinfo[0])))
+       for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
+               struct lov_oinfo __user *up;
+               if (get_user(up, ulsm->lsm_oinfo + i) ||
+                   copy_to_user(up, *p, sizeof(struct lov_oinfo)))
                        return -EFAULT;
        }
        return 0;
@@ -1287,9 +1289,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
 
 static int
 echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
-                void *ulsm, int ulsm_nob)
+               struct lov_stripe_md __user *ulsm, int ulsm_nob)
 {
        struct echo_client_obd *ec = ed->ed_ec;
+       struct lov_oinfo **p;
        int                  i;
 
        if (ulsm_nob < sizeof(*lsm))
@@ -1305,11 +1308,10 @@ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
                return -EINVAL;
 
 
-       for (i = 0; i < lsm->lsm_stripe_count; i++) {
-               if (copy_from_user(lsm->lsm_oinfo[i],
-                                      ((struct lov_stripe_md *)ulsm)-> \
-                                      lsm_oinfo[i],
-                                      sizeof(lsm->lsm_oinfo[0])))
+       for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
+               struct lov_oinfo __user *up;
+               if (get_user(up, ulsm->lsm_oinfo + i) ||
+                   copy_from_user(*p, up, sizeof(struct lov_oinfo)))
                        return -EFAULT;
        }
        return 0;
index acf746b..f2f453c 100644 (file)
@@ -1034,10 +1034,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
        unsigned delay;
 
        /* Continue a partial initialization */
-       if (type == HUB_INIT2)
-               goto init2;
-       if (type == HUB_INIT3)
+       if (type == HUB_INIT2 || type == HUB_INIT3) {
+               device_lock(hub->intfdev);
+
+               /* Was the hub disconnected while we were waiting? */
+               if (hub->disconnected) {
+                       device_unlock(hub->intfdev);
+                       kref_put(&hub->kref, hub_release);
+                       return;
+               }
+               if (type == HUB_INIT2)
+                       goto init2;
                goto init3;
+       }
+       kref_get(&hub->kref);
 
        /* The superspeed hub except for root hub has to use Hub Depth
         * value as an offset into the route string to locate the bits
@@ -1235,6 +1245,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
                        queue_delayed_work(system_power_efficient_wq,
                                        &hub->init_work,
                                        msecs_to_jiffies(delay));
+                       device_unlock(hub->intfdev);
                        return;         /* Continues at init3: below */
                } else {
                        msleep(delay);
@@ -1256,6 +1267,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
        /* Allow autosuspend if it was suppressed */
        if (type <= HUB_INIT3)
                usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
+
+       if (type == HUB_INIT2 || type == HUB_INIT3)
+               device_unlock(hub->intfdev);
+
+       kref_put(&hub->kref, hub_release);
 }
 
 /* Implement the continuations for the delays above */
index 1e6d757..f6bb118 100644 (file)
@@ -4794,8 +4794,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
        ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
        slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
        slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
+       /*
+        * refer to section 6.2.2: MTT should be 0 for full speed hub,
+        * but it may be already set to 1 when setup an xHCI virtual
+        * device, so clear it anyway.
+        */
        if (tt->multi)
                slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
+       else if (hdev->speed == USB_SPEED_FULL)
+               slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
+
        if (xhci->hci_version > 0x95) {
                xhci_dbg(xhci, "xHCI version %x needs hub "
                                "TT think time and number of ports\n",
@@ -5046,6 +5054,10 @@ static int __init xhci_hcd_init(void)
        BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
        /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
        BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+
+       if (usb_disabled())
+               return -ENODEV;
+
        return 0;
 }
 
index 7d4f51a..59b2126 100644 (file)
@@ -160,6 +160,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+       { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
        { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
        { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
index f51a5d5..ec1b8f2 100644 (file)
@@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty,
         * through. Since this has a reasonably high failure rate, we retry
         * several times.
         */
-       while (retries--) {
+       while (retries) {
+               retries--;
                result = usb_control_msg(serial->dev,
                                usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
                                0x1, 0, NULL, 0, 100);
index 4bd23bb..ee71bad 100644 (file)
@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 
        vma->vm_ops = &gntdev_vmops;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
 
        if (use_ptemod)
                vma->vm_flags |= VM_DONTCOPY;
index 745d234..d83a021 100644 (file)
@@ -1159,6 +1159,16 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
                }
        }
 
+       /* Once we sampled i_size check for reads beyond EOF */
+       dio->i_size = i_size_read(inode);
+       if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
+               if (dio->flags & DIO_LOCKING)
+                       mutex_unlock(&inode->i_mutex);
+               kmem_cache_free(dio_cache, dio);
+               retval = 0;
+               goto out;
+       }
+
        /*
         * For file extending writes updating i_size before data writeouts
         * complete can expose uninitialized blocks in dumb filesystems.
@@ -1212,7 +1222,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
        sdio.next_block_for_io = -1;
 
        dio->iocb = iocb;
-       dio->i_size = i_size_read(inode);
 
        spin_lock_init(&dio->bio_lock);
        dio->refcount = 1;
index fa11b3a..1ce6e10 100644 (file)
@@ -428,6 +428,25 @@ static inline void bpf_jit_free(struct bpf_prog *fp)
 
 #define BPF_ANC                BIT(15)
 
+static inline bool bpf_needs_clear_a(const struct sock_filter *first)
+{
+       switch (first->code) {
+       case BPF_RET | BPF_K:
+       case BPF_LD | BPF_W | BPF_LEN:
+               return false;
+
+       case BPF_LD | BPF_W | BPF_ABS:
+       case BPF_LD | BPF_H | BPF_ABS:
+       case BPF_LD | BPF_B | BPF_ABS:
+               if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
+                       return true;
+               return false;
+
+       default:
+               return true;
+       }
+}
+
 static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
 {
        BUG_ON(ftest->code & BPF_ANC);
index 61f4f2d..9128b4e 100644 (file)
@@ -802,6 +802,7 @@ struct user_struct {
        unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
 #endif
        unsigned long locked_shm; /* How many pages of mlocked shm ? */
+       unsigned long unix_inflight;    /* How many files in flight in unix sockets */
 
 #ifdef CONFIG_KEYS
        struct key *uid_keyring;        /* UID specific keyring */
index 4307e20..1f17abe 100644 (file)
@@ -3320,7 +3320,8 @@ struct skb_gso_cb {
        int     encap_level;
        __u16   csum_start;
 };
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
+#define SKB_SGO_CB_OFFSET      32
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
 
 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
 {
index 76d1e38..0c53fd5 100644 (file)
@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
 asmlinkage long sys_lchown(const char __user *filename,
                                uid_t user, gid_t group);
 asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
 asmlinkage long sys_chown16(const char __user *filename,
                                old_uid_t user, old_gid_t group);
 asmlinkage long sys_lchown16(const char __user *filename,
index 8715287..69c44d9 100644 (file)
@@ -35,7 +35,7 @@ typedef __kernel_gid16_t        gid16_t;
 
 typedef unsigned long          uintptr_t;
 
-#ifdef CONFIG_UID16
+#ifdef CONFIG_HAVE_UID16
 /* This is defined by include/asm-{arch}/posix_types.h */
 typedef __kernel_old_uid_t     old_uid_t;
 typedef __kernel_old_gid_t     old_gid_t;
index 7c9b484..e7827ae 100644 (file)
@@ -133,6 +133,7 @@ struct cdc_ncm_ctx {
 };
 
 u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
+int cdc_ncm_change_mtu(struct net_device *net, int new_mtu);
 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
 void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
 struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
index 84b2083..0dc0a51 100644 (file)
@@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
 
 struct ipv6hdr;
 
-static inline int IP6_ECN_set_ce(struct ipv6hdr *iph)
+/* Note:
+ * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
+ * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
+ * In IPv6 case, no checksum compensates the change in IPv6 header,
+ * so we have to update skb->csum.
+ */
+static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
 {
+       __be32 from, to;
+
        if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
                return 0;
-       *(__be32*)iph |= htonl(INET_ECN_CE << 20);
+
+       from = *(__be32 *)iph;
+       to = from | htonl(INET_ECN_CE << 20);
+       *(__be32 *)iph = to;
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->csum = csum_add(csum_sub(skb->csum, from), to);
        return 1;
 }
 
@@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
        case cpu_to_be16(ETH_P_IPV6):
                if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
                    skb_tail_pointer(skb))
-                       return IP6_ECN_set_ce(ipv6_hdr(skb));
+                       return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
                break;
        }
 
index 360c480..7682cb2 100644 (file)
@@ -112,7 +112,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
                           struct inet_hashinfo *hashinfo);
 
-void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
+void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
+                         bool rearm);
+
+static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+{
+       __inet_twsk_schedule(tw, timeo, false);
+}
+
+static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+{
+       __inet_twsk_schedule(tw, timeo, true);
+}
+
 void inet_twsk_deschedule(struct inet_timewait_sock *tw);
 
 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
index 47dcd3a..141d562 100644 (file)
@@ -1019,6 +1019,16 @@ static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn)
                        return -EINVAL;
                }
 
+               if ((opcode == BPF_LSH || opcode == BPF_RSH ||
+                    opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
+                       int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
+
+                       if (insn->imm < 0 || insn->imm >= size) {
+                               verbose("invalid shift %d\n", insn->imm);
+                               return -EINVAL;
+                       }
+               }
+
                /* pattern match 'bpf_add Rx, imm' instruction */
                if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
                    regs[insn->dst_reg].type == FRAME_PTR &&
index ac4b96e..bd3357e 100644 (file)
@@ -112,21 +112,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /* finally deinitialize the claim */
-static void batadv_claim_free_rcu(struct rcu_head *rcu)
+static void batadv_claim_release(struct batadv_bla_claim *claim)
 {
-       struct batadv_bla_claim *claim;
-
-       claim = container_of(rcu, struct batadv_bla_claim, rcu);
-
        batadv_backbone_gw_free_ref(claim->backbone_gw);
-       kfree(claim);
+       kfree_rcu(claim, rcu);
 }
 
 /* free a claim, call claim_free_rcu if its the last reference */
 static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
 {
        if (atomic_dec_and_test(&claim->refcount))
-               call_rcu(&claim->rcu, batadv_claim_free_rcu);
+               batadv_claim_release(claim);
 }
 
 /**
index 1918cd5..b6bff9c 100644 (file)
@@ -64,18 +64,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
                call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
 }
 
-/**
- * batadv_hardif_free_ref_now - decrement the hard interface refcounter and
- *  possibly free it (without rcu callback)
- * @hard_iface: the hard interface to free
- */
-static inline void
-batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface)
-{
-       if (atomic_dec_and_test(&hard_iface->refcount))
-               batadv_hardif_free_rcu(&hard_iface->rcu);
-}
-
 static inline struct batadv_hard_iface *
 batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
 {
index a449195..2fbd3a6 100644 (file)
@@ -175,28 +175,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
- *  its refcount on the orig_node
- * @rcu: rcu pointer of the nc node
+ * batadv_nc_node_release - release nc_node from lists and queue for free after
+ *  rcu grace period
+ * @nc_node: the nc node to free
  */
-static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
+static void batadv_nc_node_release(struct batadv_nc_node *nc_node)
 {
-       struct batadv_nc_node *nc_node;
-
-       nc_node = container_of(rcu, struct batadv_nc_node, rcu);
        batadv_orig_node_free_ref(nc_node->orig_node);
-       kfree(nc_node);
+       kfree_rcu(nc_node, rcu);
 }
 
 /**
- * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
- * frees it
+ * batadv_nc_node_free_ref - decrement the nc node refcounter and possibly
+ *  release it
  * @nc_node: the nc node to free
  */
 static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
 {
        if (atomic_dec_and_test(&nc_node->refcount))
-               call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
+               batadv_nc_node_release(nc_node);
 }
 
 /**
index dfae974..77ea1d4 100644 (file)
@@ -150,86 +150,58 @@ err:
 }
 
 /**
- * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
- * @rcu: rcu pointer of the neigh_ifinfo object
- */
-static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
-{
-       struct batadv_neigh_ifinfo *neigh_ifinfo;
-
-       neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
-
-       if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
-               batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
-
-       kfree(neigh_ifinfo);
-}
-
-/**
- * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
- *  the neigh_ifinfo (without rcu callback)
+ * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
+ *  free after rcu grace period
  * @neigh_ifinfo: the neigh_ifinfo object to release
  */
 static void
-batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
+batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
 {
-       if (atomic_dec_and_test(&neigh_ifinfo->refcount))
-               batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
+       if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
+               batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
+
+       kfree_rcu(neigh_ifinfo, rcu);
 }
 
 /**
- * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
+ * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
  *  the neigh_ifinfo
  * @neigh_ifinfo: the neigh_ifinfo object to release
  */
 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
 {
        if (atomic_dec_and_test(&neigh_ifinfo->refcount))
-               call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
+               batadv_neigh_ifinfo_release(neigh_ifinfo);
 }
 
 /**
- * batadv_neigh_node_free_rcu - free the neigh_node
- * @rcu: rcu pointer of the neigh_node
+ * batadv_neigh_node_release - release neigh_node from lists and queue for
+ *  free after rcu grace period
+ * @neigh_node: neigh neighbor to free
  */
-static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
+static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
 {
        struct hlist_node *node_tmp;
-       struct batadv_neigh_node *neigh_node;
        struct batadv_neigh_ifinfo *neigh_ifinfo;
 
-       neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
-
        hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
                                  &neigh_node->ifinfo_list, list) {
-               batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
+               batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
        }
-       batadv_hardif_free_ref_now(neigh_node->if_incoming);
+       batadv_hardif_free_ref(neigh_node->if_incoming);
 
-       kfree(neigh_node);
-}
-
-/**
- * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
- *  and possibly free it (without rcu callback)
- * @neigh_node: neigh neighbor to free
- */
-static void
-batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
-{
-       if (atomic_dec_and_test(&neigh_node->refcount))
-               batadv_neigh_node_free_rcu(&neigh_node->rcu);
+       kfree_rcu(neigh_node, rcu);
 }
 
 /**
  * batadv_neigh_node_free_ref - decrement the neighbors refcounter
- *  and possibly free it
+ *  and possibly release it
  * @neigh_node: neigh neighbor to free
  */
 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
 {
        if (atomic_dec_and_test(&neigh_node->refcount))
-               call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
+               batadv_neigh_node_release(neigh_node);
 }
 
 /**
@@ -495,108 +467,99 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
- * @rcu: rcu pointer of the orig_ifinfo object
+ * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
+ *  free after rcu grace period
+ * @orig_ifinfo: the orig_ifinfo object to release
  */
-static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
+static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
 {
-       struct batadv_orig_ifinfo *orig_ifinfo;
        struct batadv_neigh_node *router;
 
-       orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
-
        if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
-               batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
+               batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
 
        /* this is the last reference to this object */
        router = rcu_dereference_protected(orig_ifinfo->router, true);
        if (router)
-               batadv_neigh_node_free_ref_now(router);
-       kfree(orig_ifinfo);
+               batadv_neigh_node_free_ref(router);
+
+       kfree_rcu(orig_ifinfo, rcu);
 }
 
 /**
- * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
- *  the orig_ifinfo (without rcu callback)
+ * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
+ *  the orig_ifinfo
  * @orig_ifinfo: the orig_ifinfo object to release
  */
-static void
-batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
+void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
 {
        if (atomic_dec_and_test(&orig_ifinfo->refcount))
-               batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
+               batadv_orig_ifinfo_release(orig_ifinfo);
 }
 
 /**
- * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
- *  the orig_ifinfo
- * @orig_ifinfo: the orig_ifinfo object to release
+ * batadv_orig_node_free_rcu - free the orig_node
+ * @rcu: rcu pointer of the orig_node
  */
-void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
+static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
 {
-       if (atomic_dec_and_test(&orig_ifinfo->refcount))
-               call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
+       struct batadv_orig_node *orig_node;
+
+       orig_node = container_of(rcu, struct batadv_orig_node, rcu);
+
+       batadv_mcast_purge_orig(orig_node);
+
+       batadv_frag_purge_orig(orig_node, NULL);
+
+       if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
+               orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
+
+       kfree(orig_node->tt_buff);
+       kfree(orig_node);
 }
 
-static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
+/**
+ * batadv_orig_node_release - release orig_node from lists and queue for
+ *  free after rcu grace period
+ * @orig_node: the orig node to free
+ */
+static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
 {
        struct hlist_node *node_tmp;
        struct batadv_neigh_node *neigh_node;
-       struct batadv_orig_node *orig_node;
        struct batadv_orig_ifinfo *orig_ifinfo;
 
-       orig_node = container_of(rcu, struct batadv_orig_node, rcu);
-
        spin_lock_bh(&orig_node->neigh_list_lock);
 
        /* for all neighbors towards this originator ... */
        hlist_for_each_entry_safe(neigh_node, node_tmp,
                                  &orig_node->neigh_list, list) {
                hlist_del_rcu(&neigh_node->list);
-               batadv_neigh_node_free_ref_now(neigh_node);
+               batadv_neigh_node_free_ref(neigh_node);
        }
 
        hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
                                  &orig_node->ifinfo_list, list) {
                hlist_del_rcu(&orig_ifinfo->list);
-               batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
+               batadv_orig_ifinfo_free_ref(orig_ifinfo);
        }
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
-       batadv_mcast_purge_orig(orig_node);
-
        /* Free nc_nodes */
        batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
 
-       batadv_frag_purge_orig(orig_node, NULL);
-
-       if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
-               orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
-
-       kfree(orig_node->tt_buff);
-       kfree(orig_node);
+       call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
 }
 
 /**
  * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
- * schedule an rcu callback for freeing it
+ *  release it
  * @orig_node: the orig node to free
  */
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
 {
        if (atomic_dec_and_test(&orig_node->refcount))
-               call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
-}
-
-/**
- * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
- * possibly free it (without rcu callback)
- * @orig_node: the orig node to free
- */
-void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
-{
-       if (atomic_dec_and_test(&orig_node->refcount))
-               batadv_orig_node_free_rcu(&orig_node->rcu);
+               batadv_orig_node_release(orig_node);
 }
 
 void batadv_originator_free(struct batadv_priv *bat_priv)
index aa4a436..28b751a 100644 (file)
@@ -25,7 +25,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
 void batadv_originator_free(struct batadv_priv *bat_priv);
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
-void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
                                              const uint8_t *addr);
 struct batadv_neigh_node *
index 4f2a9d2..ddd62c9 100644 (file)
@@ -219,20 +219,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
        return count;
 }
 
-static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
-{
-       struct batadv_tt_orig_list_entry *orig_entry;
-
-       orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
-
-       /* We are in an rcu callback here, therefore we cannot use
-        * batadv_orig_node_free_ref() and its call_rcu():
-        * An rcu_barrier() wouldn't wait for that to finish
-        */
-       batadv_orig_node_free_ref_now(orig_entry->orig_node);
-       kfree(orig_entry);
-}
-
 /**
  * batadv_tt_local_size_mod - change the size by v of the local table identified
  *  by vid
@@ -328,13 +314,25 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
        batadv_tt_global_size_mod(orig_node, vid, -1);
 }
 
+/**
+ * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
+ *  queue for free after rcu grace period
+ * @orig_entry: tt orig entry to be free'd
+ */
+static void
+batadv_tt_orig_list_entry_release(struct batadv_tt_orig_list_entry *orig_entry)
+{
+       batadv_orig_node_free_ref(orig_entry->orig_node);
+       kfree_rcu(orig_entry, rcu);
+}
+
 static void
 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
 {
        if (!atomic_dec_and_test(&orig_entry->refcount))
                return;
 
-       call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
+       batadv_tt_orig_list_entry_release(orig_entry);
 }
 
 /**
index 4ff77a1..3d6c8e2 100644 (file)
@@ -28,6 +28,8 @@
 const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_br_ops);
 
+static struct lock_class_key bridge_netdev_addr_lock_key;
+
 /* net device transmit always called with BH disabled */
 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -87,6 +89,11 @@ out:
        return NETDEV_TX_OK;
 }
 
+static void br_set_lockdep_class(struct net_device *dev)
+{
+       lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
+}
+
 static int br_dev_init(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -99,6 +106,7 @@ static int br_dev_init(struct net_device *dev)
        err = br_vlan_init(br);
        if (err)
                free_percpu(br->stats);
+       br_set_lockdep_class(dev);
 
        return err;
 }
index 7832d07..ce658ab 100644 (file)
@@ -128,7 +128,10 @@ static void br_stp_start(struct net_bridge *br)
        char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
        char *envp[] = { NULL };
 
-       r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+       if (net_eq(dev_net(br->dev), &init_net))
+               r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+       else
+               r = -ENOENT;
 
        spin_lock_bh(&br->lock);
 
index a42b232..185a339 100644 (file)
@@ -2479,6 +2479,8 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
  *
  *     It may return NULL if the skb requires no segmentation.  This is
  *     only possible when GSO is used for verifying header integrity.
+ *
+ *     Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
  */
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path)
@@ -2493,6 +2495,9 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                        return ERR_PTR(err);
        }
 
+       BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
+                    sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
+
        SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
        SKB_GSO_CB(skb)->encap_level = 0;
 
index f8db403..540066c 100644 (file)
@@ -282,10 +282,11 @@ void dst_release(struct dst_entry *dst)
 {
        if (dst) {
                int newrefcnt;
+               unsigned short nocache = dst->flags & DST_NOCACHE;
 
                newrefcnt = atomic_dec_return(&dst->__refcnt);
                WARN_ON(newrefcnt < 0);
-               if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
+               if (!newrefcnt && unlikely(nocache))
                        call_rcu(&dst->rcu_head, dst_destroy_rcu);
        }
 }
index 0fa2613..238bb3f 100644 (file)
@@ -775,6 +775,11 @@ int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
                        if (ftest->k == 0)
                                return -EINVAL;
                        break;
+               case BPF_ALU | BPF_LSH | BPF_K:
+               case BPF_ALU | BPF_RSH | BPF_K:
+                       if (ftest->k >= 32)
+                               return -EINVAL;
+                       break;
                case BPF_LD | BPF_MEM:
                case BPF_LDX | BPF_MEM:
                case BPF_ST:
index 30addee..838f524 100644 (file)
@@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
                        tw->tw_ipv6only = sk->sk_ipv6only;
                }
 #endif
-               /* Linkage updates. */
-               __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
 
                /* Get the TIME_WAIT timeout firing. */
                if (timeo < rto)
@@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
                        timeo = DCCP_TIMEWAIT_LEN;
 
                inet_twsk_schedule(tw, timeo);
+               /* Linkage updates. */
+               __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
                inet_twsk_put(tw);
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
index 00ec8d5..bb96c1c 100644 (file)
@@ -153,13 +153,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
        /*
         * Step 2: Hash TW into tcp ehash chain.
         * Notes :
-        * - tw_refcnt is set to 3 because :
+        * - tw_refcnt is set to 4 because :
         * - We have one reference from bhash chain.
         * - We have one reference from ehash chain.
+        * - We have one reference from timer.
+        * - One reference for ourself (our caller will release it).
         * We can use atomic_set() because prior spin_lock()/spin_unlock()
         * committed into memory all tw fields.
         */
-       atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
+       atomic_set(&tw->tw_refcnt, 4);
        inet_twsk_add_node_rcu(tw, &ehead->chain);
 
        /* Step 3: Remove SK from hash chain */
@@ -243,7 +245,7 @@ void inet_twsk_deschedule(struct inet_timewait_sock *tw)
 }
 EXPORT_SYMBOL(inet_twsk_deschedule);
 
-void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
+void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
 {
        /* timeout := RTO * 3.5
         *
@@ -271,12 +273,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
         */
 
        tw->tw_kill = timeo <= 4*HZ;
-       if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) {
-               atomic_inc(&tw->tw_refcnt);
+       if (!rearm) {
+               BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
                atomic_inc(&tw->tw_dr->tw_count);
+       } else {
+               mod_timer_pending(&tw->tw_timer, jiffies + timeo);
        }
 }
-EXPORT_SYMBOL_GPL(inet_twsk_schedule);
+EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
 
 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
                     struct inet_timewait_death_row *twdr, int family)
index c65b93a..51573f8 100644 (file)
@@ -235,6 +235,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
         * from host network stack.
         */
        features = netif_skb_features(skb);
+       BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
        segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
        if (IS_ERR_OR_NULL(segs)) {
                kfree_skb(skb);
@@ -893,7 +894,7 @@ static int __ip_append_data(struct sock *sk,
        if (((length > mtu) || (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
-           (sk->sk_type == SOCK_DGRAM)) {
+           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
                err = ip_ufo_append_data(sk, queue, getfrag, from, length,
                                         hh_len, fragheaderlen, transhdrlen,
                                         maxfraglen, flags);
index 17e7339..fec2907 100644 (file)
@@ -163,9 +163,9 @@ kill_with_rst:
                if (tcp_death_row.sysctl_tw_recycle &&
                    tcptw->tw_ts_recent_stamp &&
                    tcp_tw_remember_stamp(tw))
-                       inet_twsk_schedule(tw, tw->tw_timeout);
+                       inet_twsk_reschedule(tw, tw->tw_timeout);
                else
-                       inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
+                       inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
                return TCP_TW_ACK;
        }
 
@@ -203,7 +203,7 @@ kill:
                                return TCP_TW_SUCCESS;
                        }
                }
-               inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
+               inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 
                if (tmp_opt.saw_tstamp) {
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
@@ -253,7 +253,7 @@ kill:
                 * Do not reschedule in the last case.
                 */
                if (paws_reject || th->ack)
-                       inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
+                       inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 
                return tcp_timewait_check_oow_rate_limit(
                        tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
@@ -324,9 +324,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                } while (0);
 #endif
 
-               /* Linkage updates. */
-               __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
-
                /* Get the TIME_WAIT timeout firing. */
                if (timeo < rto)
                        timeo = rto;
@@ -340,6 +337,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                }
 
                inet_twsk_schedule(tw, timeo);
+               /* Linkage updates. */
+               __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
                inet_twsk_put(tw);
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
index 17d3566..3e6a472 100644 (file)
@@ -219,7 +219,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
        yeah->fast_count = 0;
        yeah->reno_count = max(yeah->reno_count>>1, 2U);
 
-       return tp->snd_cwnd - reduction;
+       return max_t(int, tp->snd_cwnd - reduction, 2);
 }
 
 static struct tcp_congestion_ops tcp_yeah __read_mostly = {
index bff6974..7852608 100644 (file)
@@ -230,7 +230,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
        xfrm_dst_ifdown(dst, dev);
 }
 
-static struct dst_ops xfrm4_dst_ops = {
+static struct dst_ops xfrm4_dst_ops_template = {
        .family =               AF_INET,
        .gc =                   xfrm4_garbage_collect,
        .update_pmtu =          xfrm4_update_pmtu,
@@ -244,7 +244,7 @@ static struct dst_ops xfrm4_dst_ops = {
 
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
        .family =               AF_INET,
-       .dst_ops =              &xfrm4_dst_ops,
+       .dst_ops =              &xfrm4_dst_ops_template,
        .dst_lookup =           xfrm4_dst_lookup,
        .get_saddr =            xfrm4_get_saddr,
        .decode_session =       _decode_session4,
@@ -266,7 +266,7 @@ static struct ctl_table xfrm4_policy_table[] = {
        { }
 };
 
-static int __net_init xfrm4_net_init(struct net *net)
+static int __net_init xfrm4_net_sysctl_init(struct net *net)
 {
        struct ctl_table *table;
        struct ctl_table_header *hdr;
@@ -294,7 +294,7 @@ err_alloc:
        return -ENOMEM;
 }
 
-static void __net_exit xfrm4_net_exit(struct net *net)
+static void __net_exit xfrm4_net_sysctl_exit(struct net *net)
 {
        struct ctl_table *table;
 
@@ -306,12 +306,44 @@ static void __net_exit xfrm4_net_exit(struct net *net)
        if (!net_eq(net, &init_net))
                kfree(table);
 }
+#else /* CONFIG_SYSCTL */
+static int inline xfrm4_net_sysctl_init(struct net *net)
+{
+       return 0;
+}
+
+static void inline xfrm4_net_sysctl_exit(struct net *net)
+{
+}
+#endif
+
+static int __net_init xfrm4_net_init(struct net *net)
+{
+       int ret;
+
+       memcpy(&net->xfrm.xfrm4_dst_ops, &xfrm4_dst_ops_template,
+              sizeof(xfrm4_dst_ops_template));
+       ret = dst_entries_init(&net->xfrm.xfrm4_dst_ops);
+       if (ret)
+               return ret;
+
+       ret = xfrm4_net_sysctl_init(net);
+       if (ret)
+               dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
+
+       return ret;
+}
+
+static void __net_exit xfrm4_net_exit(struct net *net)
+{
+       xfrm4_net_sysctl_exit(net);
+       dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
+}
 
 static struct pernet_operations __net_initdata xfrm4_net_ops = {
        .init   = xfrm4_net_init,
        .exit   = xfrm4_net_exit,
 };
-#endif
 
 static void __init xfrm4_policy_init(void)
 {
@@ -320,13 +352,9 @@ static void __init xfrm4_policy_init(void)
 
 void __init xfrm4_init(void)
 {
-       dst_entries_init(&xfrm4_dst_ops);
-
        xfrm4_state_init();
        xfrm4_policy_init();
        xfrm4_protocol_init();
-#ifdef CONFIG_SYSCTL
        register_pernet_subsys(&xfrm4_net_ops);
-#endif
 }
 
index a2d6850..f4795b0 100644 (file)
@@ -5267,13 +5267,10 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
                goto out;
        }
 
-       if (!write) {
-               err = snprintf(str, sizeof(str), "%pI6",
-                              &secret->secret);
-               if (err >= sizeof(str)) {
-                       err = -EIO;
-                       goto out;
-               }
+       err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
+       if (err >= sizeof(str)) {
+               err = -EIO;
+               goto out;
        }
 
        err = proc_dostring(&lctl, write, buffer, lenp, ppos);
index 882124e..a8f6986 100644 (file)
@@ -552,7 +552,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh)
 
        rcu_read_lock();
        p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
-       if (p && ip6addrlbl_hold(p))
+       if (p && !ip6addrlbl_hold(p))
                p = NULL;
        lseq = ip6addrlbl_table.seq;
        rcu_read_unlock();
index bc09cb9..f50228b 100644 (file)
@@ -1329,7 +1329,7 @@ emsgsize:
             (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) &&
-           (sk->sk_type == SOCK_DGRAM)) {
+           (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen,
                                          transhdrlen, mtu, flags, rt);
index c1938ad..c1147ac 100644 (file)
@@ -465,8 +465,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
                skb_set_queue_mapping(skb, queue_mapping);
+               rcu_read_lock();
                err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
                               np->tclass);
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
index 901ef6f..5266ad2 100644 (file)
@@ -24,7 +24,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
        struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
 
        if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
-               IP6_ECN_set_ce(inner_iph);
+               IP6_ECN_set_ce(skb, inner_iph);
 }
 
 /* Add encapsulation header.
index f337a90..4fb94f6 100644 (file)
@@ -289,7 +289,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
        xfrm_dst_ifdown(dst, dev);
 }
 
-static struct dst_ops xfrm6_dst_ops = {
+static struct dst_ops xfrm6_dst_ops_template = {
        .family =               AF_INET6,
        .gc =                   xfrm6_garbage_collect,
        .update_pmtu =          xfrm6_update_pmtu,
@@ -303,7 +303,7 @@ static struct dst_ops xfrm6_dst_ops = {
 
 static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
        .family =               AF_INET6,
-       .dst_ops =              &xfrm6_dst_ops,
+       .dst_ops =              &xfrm6_dst_ops_template,
        .dst_lookup =           xfrm6_dst_lookup,
        .get_saddr =            xfrm6_get_saddr,
        .decode_session =       _decode_session6,
@@ -336,7 +336,7 @@ static struct ctl_table xfrm6_policy_table[] = {
        { }
 };
 
-static int __net_init xfrm6_net_init(struct net *net)
+static int __net_init xfrm6_net_sysctl_init(struct net *net)
 {
        struct ctl_table *table;
        struct ctl_table_header *hdr;
@@ -364,7 +364,7 @@ err_alloc:
        return -ENOMEM;
 }
 
-static void __net_exit xfrm6_net_exit(struct net *net)
+static void __net_exit xfrm6_net_sysctl_exit(struct net *net)
 {
        struct ctl_table *table;
 
@@ -376,24 +376,52 @@ static void __net_exit xfrm6_net_exit(struct net *net)
        if (!net_eq(net, &init_net))
                kfree(table);
 }
+#else /* CONFIG_SYSCTL */
+static int inline xfrm6_net_sysctl_init(struct net *net)
+{
+       return 0;
+}
+
+static void inline xfrm6_net_sysctl_exit(struct net *net)
+{
+}
+#endif
+
+static int __net_init xfrm6_net_init(struct net *net)
+{
+       int ret;
+
+       memcpy(&net->xfrm.xfrm6_dst_ops, &xfrm6_dst_ops_template,
+              sizeof(xfrm6_dst_ops_template));
+       ret = dst_entries_init(&net->xfrm.xfrm6_dst_ops);
+       if (ret)
+               return ret;
+
+       ret = xfrm6_net_sysctl_init(net);
+       if (ret)
+               dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
+
+       return ret;
+}
+
+static void __net_exit xfrm6_net_exit(struct net *net)
+{
+       xfrm6_net_sysctl_exit(net);
+       dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
+}
 
 static struct pernet_operations xfrm6_net_ops = {
        .init   = xfrm6_net_init,
        .exit   = xfrm6_net_exit,
 };
-#endif
 
 int __init xfrm6_init(void)
 {
        int ret;
 
-       dst_entries_init(&xfrm6_dst_ops);
-
        ret = xfrm6_policy_init();
-       if (ret) {
-               dst_entries_destroy(&xfrm6_dst_ops);
+       if (ret)
                goto out;
-       }
        ret = xfrm6_state_init();
        if (ret)
                goto out_policy;
@@ -402,9 +430,7 @@ int __init xfrm6_init(void)
        if (ret)
                goto out_state;
 
-#ifdef CONFIG_SYSCTL
        register_pernet_subsys(&xfrm6_net_ops);
-#endif
 out:
        return ret;
 out_state:
@@ -416,11 +442,8 @@ out_policy:
 
 void xfrm6_fini(void)
 {
-#ifdef CONFIG_SYSCTL
        unregister_pernet_subsys(&xfrm6_net_ops);
-#endif
        xfrm6_protocol_fini();
        xfrm6_policy_fini();
        xfrm6_state_fini();
-       dst_entries_destroy(&xfrm6_dst_ops);
 }
index 27e1496..b3fe02a 100644 (file)
@@ -337,12 +337,10 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
        unsigned short gso_type = skb_shinfo(skb)->gso_type;
        struct sw_flow_key later_key;
        struct sk_buff *segs, *nskb;
-       struct ovs_skb_cb ovs_cb;
        int err;
 
-       ovs_cb = *OVS_CB(skb);
+       BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
        segs = __skb_gso_segment(skb, NETIF_F_SG, false);
-       *OVS_CB(skb) = ovs_cb;
        if (IS_ERR(segs))
                return PTR_ERR(segs);
        if (segs == NULL)
@@ -360,7 +358,6 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
        /* Queue all of the segments. */
        skb = segs;
        do {
-               *OVS_CB(skb) = ovs_cb;
                if (gso_type & SKB_GSO_UDP && skb != segs)
                        key = &later_key;
 
index 32ab87d..11d0b29 100644 (file)
@@ -377,6 +377,10 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
        struct sockaddr_pn sa;
        u16 len;
 
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               return NET_RX_DROP;
+
        /* check we have at least a full Phonet header */
        if (!pskb_pull(skb, sizeof(struct phonethdr)))
                goto out;
index b453270..3c6f6b7 100644 (file)
@@ -666,8 +666,10 @@ static void qdisc_rcu_free(struct rcu_head *head)
 {
        struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
 
-       if (qdisc_is_percpu_stats(qdisc))
+       if (qdisc_is_percpu_stats(qdisc)) {
                free_percpu(qdisc->cpu_bstats);
+               free_percpu(qdisc->cpu_qstats);
+       }
 
        kfree((char *) qdisc - qdisc->padded);
 }
index fef2acd..ecae556 100644 (file)
@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
        int error;
        struct sctp_transport *transport = (struct sctp_transport *) peer;
        struct sctp_association *asoc = transport->asoc;
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
 
        /* Check whether a task is in the sock.  */
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 
                /* Try again later.  */
@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
                           transport, GFP_ATOMIC);
 
        if (error)
-               asoc->base.sk->sk_err = -error;
+               sk->sk_err = -error;
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_transport_put(transport);
 }
 
@@ -285,11 +286,12 @@ out_unlock:
 static void sctp_generate_timeout_event(struct sctp_association *asoc,
                                        sctp_event_timeout_t timeout_type)
 {
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
        int error = 0;
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy: timer %d\n", __func__,
                         timeout_type);
 
@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
                           (void *)timeout_type, GFP_ATOMIC);
 
        if (error)
-               asoc->base.sk->sk_err = -error;
+               sk->sk_err = -error;
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_association_put(asoc);
 }
 
@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
        int error = 0;
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 
                /* Try again later.  */
@@ -389,10 +392,10 @@ void sctp_generate_heartbeat_event(unsigned long data)
                           transport, GFP_ATOMIC);
 
         if (error)
-                asoc->base.sk->sk_err = -error;
+               sk->sk_err = -error;
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_transport_put(transport);
 }
 
@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
 {
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 
                /* Try again later.  */
@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
                   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_association_put(asoc);
 }
 
index 3ee27b7..e6bb98e 100644 (file)
@@ -4829,7 +4829,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
 
        retval = SCTP_DISPOSITION_CONSUME;
 
-       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+       if (abort)
+               sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
 
        /* Even if we can't send the ABORT due to low memory delete the
         * TCB.  This is a departure from our typical NOMEM handling.
@@ -4966,7 +4967,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
        retval = SCTP_DISPOSITION_CONSUME;
 
-       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+       if (abort)
+               sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
 
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
index a63c2c8..76e6ec6 100644 (file)
@@ -1513,8 +1513,7 @@ static void sctp_close(struct sock *sk, long timeout)
                        struct sctp_chunk *chunk;
 
                        chunk = sctp_make_abort_user(asoc, NULL, 0);
-                       if (chunk)
-                               sctp_primitive_ABORT(net, asoc, chunk);
+                       sctp_primitive_ABORT(net, asoc, chunk);
                } else
                        sctp_primitive_SHUTDOWN(net, asoc, NULL);
        }
index 26d50c5..3e0fc51 100644 (file)
@@ -320,7 +320,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
        struct ctl_table tbl;
        bool changed = false;
        char *none = "none";
-       char tmp[8];
+       char tmp[8] = {0};
        int ret;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
index a398f62..cb3a01a 100644 (file)
@@ -1481,6 +1481,21 @@ static void unix_destruct_scm(struct sk_buff *skb)
        sock_wfree(skb);
 }
 
+/*
+ * The "user->unix_inflight" variable is protected by the garbage
+ * collection lock, and we just read it locklessly here. If you go
+ * over the limit, there might be a tiny race in actually noticing
+ * it across threads. Tough.
+ */
+static inline bool too_many_unix_fds(struct task_struct *p)
+{
+       struct user_struct *user = current_user();
+
+       if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
+               return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+       return false;
+}
+
 #define MAX_RECURSION_LEVEL 4
 
 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
@@ -1489,6 +1504,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
        unsigned char max_level = 0;
        int unix_sock_count = 0;
 
+       if (too_many_unix_fds(current))
+               return -ETOOMANYREFS;
+
        for (i = scm->fp->count - 1; i >= 0; i--) {
                struct sock *sk = unix_get_socket(scm->fp->fp[i]);
 
@@ -1510,10 +1528,8 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
        if (!UNIXCB(skb).fp)
                return -ENOMEM;
 
-       if (unix_sock_count) {
-               for (i = scm->fp->count - 1; i >= 0; i--)
-                       unix_inflight(scm->fp->fp[i]);
-       }
+       for (i = scm->fp->count - 1; i >= 0; i--)
+               unix_inflight(scm->fp->fp[i]);
        return max_level;
 }
 
index a73a226..8fcdc22 100644 (file)
@@ -120,11 +120,11 @@ void unix_inflight(struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
 
+       spin_lock(&unix_gc_lock);
+
        if (s) {
                struct unix_sock *u = unix_sk(s);
 
-               spin_lock(&unix_gc_lock);
-
                if (atomic_long_inc_return(&u->inflight) == 1) {
                        BUG_ON(!list_empty(&u->link));
                        list_add_tail(&u->link, &gc_inflight_list);
@@ -132,25 +132,28 @@ void unix_inflight(struct file *fp)
                        BUG_ON(list_empty(&u->link));
                }
                unix_tot_inflight++;
-               spin_unlock(&unix_gc_lock);
        }
+       fp->f_cred->user->unix_inflight++;
+       spin_unlock(&unix_gc_lock);
 }
 
 void unix_notinflight(struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
 
+       spin_lock(&unix_gc_lock);
+
        if (s) {
                struct unix_sock *u = unix_sk(s);
 
-               spin_lock(&unix_gc_lock);
                BUG_ON(list_empty(&u->link));
 
                if (atomic_long_dec_and_test(&u->inflight))
                        list_del_init(&u->link);
                unix_tot_inflight--;
-               spin_unlock(&unix_gc_lock);
        }
+       fp->f_cred->user->unix_inflight--;
+       spin_unlock(&unix_gc_lock);
 }
 
 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
index fbcedbe..5097dce 100644 (file)
@@ -153,6 +153,8 @@ static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb)
 {
        struct sk_buff *segs;
 
+       BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
+       BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
        segs = skb_gso_segment(skb, 0);
        kfree_skb(skb);
        if (IS_ERR(segs))
index 638af06..4cd2076 100644 (file)
@@ -2806,7 +2806,6 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
 
 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
 {
-       struct net *net;
        int err = 0;
        if (unlikely(afinfo == NULL))
                return -EINVAL;
@@ -2837,26 +2836,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
        }
        spin_unlock(&xfrm_policy_afinfo_lock);
 
-       rtnl_lock();
-       for_each_net(net) {
-               struct dst_ops *xfrm_dst_ops;
-
-               switch (afinfo->family) {
-               case AF_INET:
-                       xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
-                       break;
-#if IS_ENABLED(CONFIG_IPV6)
-               case AF_INET6:
-                       xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
-                       break;
-#endif
-               default:
-                       BUG();
-               }
-               *xfrm_dst_ops = *afinfo->dst_ops;
-       }
-       rtnl_unlock();
-
        return err;
 }
 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
@@ -2892,22 +2871,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
 }
 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
 
-static void __net_init xfrm_dst_ops_init(struct net *net)
-{
-       struct xfrm_policy_afinfo *afinfo;
-
-       rcu_read_lock();
-       afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
-       if (afinfo)
-               net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
-#if IS_ENABLED(CONFIG_IPV6)
-       afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
-       if (afinfo)
-               net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
-#endif
-       rcu_read_unlock();
-}
-
 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
@@ -3056,7 +3019,6 @@ static int __net_init xfrm_net_init(struct net *net)
        rv = xfrm_policy_init(net);
        if (rv < 0)
                goto out_policy;
-       xfrm_dst_ops_init(net);
        rv = xfrm_sysctl_init(net);
        if (rv < 0)
                goto out_sysctl;
index 3d1984e..e00bcd1 100644 (file)
@@ -42,6 +42,7 @@
 
 #ifndef EM_AARCH64
 #define EM_AARCH64     183
+#define R_AARCH64_NONE         0
 #define R_AARCH64_ABS64        257
 #endif
 
@@ -160,6 +161,22 @@ static int make_nop_x86(void *map, size_t const offset)
        return 0;
 }
 
+static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
+static int make_nop_arm64(void *map, size_t const offset)
+{
+       uint32_t *ptr;
+
+       ptr = map + offset;
+       /* bl <_mcount> is 0x94000000 before relocation */
+       if (*ptr != 0x94000000)
+               return -1;
+
+       /* Convert to nop */
+       ulseek(fd_map, offset, SEEK_SET);
+       uwrite(fd_map, ideal_nop, 4);
+       return 0;
+}
+
 /*
  * Get the whole file as a programming convenience in order to avoid
  * malloc+lseek+read+free of many pieces.  If successful, then mmap
@@ -353,7 +370,12 @@ do_file(char const *const fname)
                         altmcount = "__gnu_mcount_nc";
                         break;
        case EM_AARCH64:
-                        reltype = R_AARCH64_ABS64; gpfx = '_'; break;
+                       reltype = R_AARCH64_ABS64;
+                       make_nop = make_nop_arm64;
+                       rel_type_nop = R_AARCH64_NONE;
+                       ideal_nop = ideal_nop4_arm64;
+                       gpfx = '_';
+                       break;
        case EM_IA_64:   reltype = R_IA64_IMM64;   gpfx = '_'; break;
        case EM_METAG:   reltype = R_METAG_ADDR32;
                         altmcount = "_mcount_wrapper";
index 49b582a..b9897e2 100644 (file)
@@ -377,7 +377,7 @@ static void nop_mcount(Elf_Shdr const *const relhdr,
 
                if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
                        if (make_nop)
-                               ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset);
+                               ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
                        if (warn_on_notrace_sect && !once) {
                                printf("Section %s has mcount callers being ignored\n",
                                       txtname);
index 826470d..96e2486 100755 (executable)
@@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
 
 } elsif ($arch eq "powerpc") {
     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
-    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
+    # See comment in the sparc64 section for why we use '\w'.
+    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
 
     if ($bits == 64) {
index 196a6fe..a85d455 100644 (file)
@@ -1405,6 +1405,8 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
                return -EFAULT;
        if (tlv.length < sizeof(unsigned int) * 2)
                return -EINVAL;
+       if (!tlv.numid)
+               return -EINVAL;
        down_read(&card->controls_rwsem);
        kctl = snd_ctl_find_numid(card, tlv.numid);
        if (kctl == NULL) {
index 886be7d..38514ed 100644 (file)
@@ -90,7 +90,7 @@ static int snd_hrtimer_start(struct snd_timer *t)
        struct snd_hrtimer *stime = t->private_data;
 
        atomic_set(&stime->running, 0);
-       hrtimer_cancel(&stime->hrt);
+       hrtimer_try_to_cancel(&stime->hrt);
        hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
                      HRTIMER_MODE_REL);
        atomic_set(&stime->running, 1);
@@ -101,6 +101,7 @@ static int snd_hrtimer_stop(struct snd_timer *t)
 {
        struct snd_hrtimer *stime = t->private_data;
        atomic_set(&stime->running, 0);
+       hrtimer_try_to_cancel(&stime->hrt);
        return 0;
 }
 
index b48b434..9630e9f 100644 (file)
@@ -255,10 +255,15 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
        if (! (runtime = substream->runtime))
                return -ENOTTY;
 
-       /* only fifo_size is different, so just copy all */
-       data = memdup_user(data32, sizeof(*data32));
-       if (IS_ERR(data))
-               return PTR_ERR(data);
+       data = kmalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       /* only fifo_size (RO from userspace) is different, so just copy all */
+       if (copy_from_user(data, data32, sizeof(*data32))) {
+               err = -EFAULT;
+               goto error;
+       }
 
        if (refine)
                err = snd_pcm_hw_refine(substream, data);
index edbdab8..bd47414 100644 (file)
@@ -1962,7 +1962,7 @@ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
                 * No restrictions so for a user client we can clear
                 * the whole fifo
                 */
-               if (client->type == USER_CLIENT)
+               if (client->type == USER_CLIENT && client->data.user.fifo)
                        snd_seq_fifo_clear(client->data.user.fifo);
        }
 
index 81f7c10..6517590 100644 (file)
@@ -49,11 +49,12 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
        struct snd_seq_port_info *data;
        mm_segment_t fs;
 
-       data = memdup_user(data32, sizeof(*data32));
-       if (IS_ERR(data))
-               return PTR_ERR(data);
+       data = kmalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
-       if (get_user(data->flags, &data32->flags) ||
+       if (copy_from_user(data, data32, sizeof(*data32)) ||
+           get_user(data->flags, &data32->flags) ||
            get_user(data->time_queue, &data32->time_queue))
                goto error;
        data->kernel = NULL;
index a0cda38..77ec214 100644 (file)
@@ -142,8 +142,10 @@ static struct snd_seq_queue *queue_new(int owner, int locked)
 static void queue_delete(struct snd_seq_queue *q)
 {
        /* stop and release the timer */
+       mutex_lock(&q->timer_mutex);
        snd_seq_timer_stop(q->timer);
        snd_seq_timer_close(q);
+       mutex_unlock(&q->timer_mutex);
        /* wait until access free */
        snd_use_lock_sync(&q->use_lock);
        /* release resources... */
index a9a1a04..a419878 100644 (file)
@@ -65,6 +65,7 @@ struct snd_timer_user {
        int qtail;
        int qused;
        int queue_size;
+       bool disconnected;
        struct snd_timer_read *queue;
        struct snd_timer_tread *tqueue;
        spinlock_t qlock;
@@ -73,7 +74,7 @@ struct snd_timer_user {
        struct timespec tstamp;         /* trigger tstamp */
        wait_queue_head_t qchange_sleep;
        struct fasync_struct *fasync;
-       struct mutex tread_sem;
+       struct mutex ioctl_lock;
 };
 
 /* list of timers */
@@ -215,11 +216,13 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
                    slave->slave_id == master->slave_id) {
                        list_move_tail(&slave->open_list, &master->slave_list_head);
                        spin_lock_irq(&slave_active_lock);
+                       spin_lock(&master->timer->lock);
                        slave->master = master;
                        slave->timer = master->timer;
                        if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
                                list_add_tail(&slave->active_list,
                                              &master->slave_active_head);
+                       spin_unlock(&master->timer->lock);
                        spin_unlock_irq(&slave_active_lock);
                }
        }
@@ -288,6 +291,9 @@ int snd_timer_open(struct snd_timer_instance **ti,
                mutex_unlock(&register_mutex);
                return -ENOMEM;
        }
+       /* take a card refcount for safe disconnection */
+       if (timer->card)
+               get_device(&timer->card->card_dev);
        timeri->slave_class = tid->dev_sclass;
        timeri->slave_id = slave_id;
        if (list_empty(&timer->open_list_head) && timer->hw.open)
@@ -346,15 +352,21 @@ int snd_timer_close(struct snd_timer_instance *timeri)
                    timer->hw.close)
                        timer->hw.close(timer);
                /* remove slave links */
+               spin_lock_irq(&slave_active_lock);
+               spin_lock(&timer->lock);
                list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
                                         open_list) {
-                       spin_lock_irq(&slave_active_lock);
-                       _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION);
                        list_move_tail(&slave->open_list, &snd_timer_slave_list);
                        slave->master = NULL;
                        slave->timer = NULL;
-                       spin_unlock_irq(&slave_active_lock);
+                       list_del_init(&slave->ack_list);
+                       list_del_init(&slave->active_list);
                }
+               spin_unlock(&timer->lock);
+               spin_unlock_irq(&slave_active_lock);
+               /* release a card refcount for safe disconnection */
+               if (timer->card)
+                       put_device(&timer->card->card_dev);
                mutex_unlock(&register_mutex);
        }
  out:
@@ -441,9 +453,12 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
 
        spin_lock_irqsave(&slave_active_lock, flags);
        timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
-       if (timeri->master)
+       if (timeri->master && timeri->timer) {
+               spin_lock(&timeri->timer->lock);
                list_add_tail(&timeri->active_list,
                              &timeri->master->slave_active_head);
+               spin_unlock(&timeri->timer->lock);
+       }
        spin_unlock_irqrestore(&slave_active_lock, flags);
        return 1; /* delayed start */
 }
@@ -467,6 +482,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
        timer = timeri->timer;
        if (timer == NULL)
                return -EINVAL;
+       if (timer->card && timer->card->shutdown)
+               return -ENODEV;
        spin_lock_irqsave(&timer->lock, flags);
        timeri->ticks = timeri->cticks = ticks;
        timeri->pticks = 0;
@@ -489,6 +506,8 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
                if (!keep_flag) {
                        spin_lock_irqsave(&slave_active_lock, flags);
                        timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+                       list_del_init(&timeri->ack_list);
+                       list_del_init(&timeri->active_list);
                        spin_unlock_irqrestore(&slave_active_lock, flags);
                }
                goto __end;
@@ -499,6 +518,10 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
        spin_lock_irqsave(&timer->lock, flags);
        list_del_init(&timeri->ack_list);
        list_del_init(&timeri->active_list);
+       if (timer->card && timer->card->shutdown) {
+               spin_unlock_irqrestore(&timer->lock, flags);
+               return 0;
+       }
        if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
            !(--timer->running)) {
                timer->hw.stop(timer);
@@ -561,6 +584,8 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
        timer = timeri->timer;
        if (! timer)
                return -EINVAL;
+       if (timer->card && timer->card->shutdown)
+               return -ENODEV;
        spin_lock_irqsave(&timer->lock, flags);
        if (!timeri->cticks)
                timeri->cticks = 1;
@@ -624,6 +649,9 @@ static void snd_timer_tasklet(unsigned long arg)
        unsigned long resolution, ticks;
        unsigned long flags;
 
+       if (timer->card && timer->card->shutdown)
+               return;
+
        spin_lock_irqsave(&timer->lock, flags);
        /* now process all callbacks */
        while (!list_empty(&timer->sack_list_head)) {
@@ -664,6 +692,9 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
        if (timer == NULL)
                return;
 
+       if (timer->card && timer->card->shutdown)
+               return;
+
        spin_lock_irqsave(&timer->lock, flags);
 
        /* remember the current resolution */
@@ -694,7 +725,7 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
                } else {
                        ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
                        if (--timer->running)
-                               list_del(&ti->active_list);
+                               list_del_init(&ti->active_list);
                }
                if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
                    (ti->flags & SNDRV_TIMER_IFLG_FAST))
@@ -874,11 +905,28 @@ static int snd_timer_dev_register(struct snd_device *dev)
        return 0;
 }
 
+/* just for reference in snd_timer_dev_disconnect() below */
+static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
+                                    int event, struct timespec *tstamp,
+                                    unsigned long resolution);
+
 static int snd_timer_dev_disconnect(struct snd_device *device)
 {
        struct snd_timer *timer = device->device_data;
+       struct snd_timer_instance *ti;
+
        mutex_lock(&register_mutex);
        list_del_init(&timer->device_list);
+       /* wake up pending sleepers */
+       list_for_each_entry(ti, &timer->open_list_head, open_list) {
+               /* FIXME: better to have a ti.disconnect() op */
+               if (ti->ccallback == snd_timer_user_ccallback) {
+                       struct snd_timer_user *tu = ti->callback_data;
+
+                       tu->disconnected = true;
+                       wake_up(&tu->qchange_sleep);
+               }
+       }
        mutex_unlock(&register_mutex);
        return 0;
 }
@@ -889,6 +937,8 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam
        unsigned long resolution = 0;
        struct snd_timer_instance *ti, *ts;
 
+       if (timer->card && timer->card->shutdown)
+               return;
        if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
                return;
        if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
@@ -1047,6 +1097,8 @@ static void snd_timer_proc_read(struct snd_info_entry *entry,
 
        mutex_lock(&register_mutex);
        list_for_each_entry(timer, &snd_timer_list, device_list) {
+               if (timer->card && timer->card->shutdown)
+                       continue;
                switch (timer->tmr_class) {
                case SNDRV_TIMER_CLASS_GLOBAL:
                        snd_iprintf(buffer, "G%i: ", timer->tmr_device);
@@ -1253,7 +1305,7 @@ static int snd_timer_user_open(struct inode *inode, struct file *file)
                return -ENOMEM;
        spin_lock_init(&tu->qlock);
        init_waitqueue_head(&tu->qchange_sleep);
-       mutex_init(&tu->tread_sem);
+       mutex_init(&tu->ioctl_lock);
        tu->ticks = 1;
        tu->queue_size = 128;
        tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
@@ -1273,8 +1325,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
        if (file->private_data) {
                tu = file->private_data;
                file->private_data = NULL;
+               mutex_lock(&tu->ioctl_lock);
                if (tu->timeri)
                        snd_timer_close(tu->timeri);
+               mutex_unlock(&tu->ioctl_lock);
                kfree(tu->queue);
                kfree(tu->tqueue);
                kfree(tu);
@@ -1512,7 +1566,6 @@ static int snd_timer_user_tselect(struct file *file,
        int err = 0;
 
        tu = file->private_data;
-       mutex_lock(&tu->tread_sem);
        if (tu->timeri) {
                snd_timer_close(tu->timeri);
                tu->timeri = NULL;
@@ -1556,7 +1609,6 @@ static int snd_timer_user_tselect(struct file *file,
        }
 
       __err:
-       mutex_unlock(&tu->tread_sem);
        return err;
 }
 
@@ -1769,7 +1821,7 @@ enum {
        SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
 };
 
-static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
                                 unsigned long arg)
 {
        struct snd_timer_user *tu;
@@ -1786,17 +1838,11 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
        {
                int xarg;
 
-               mutex_lock(&tu->tread_sem);
-               if (tu->timeri) {       /* too late */
-                       mutex_unlock(&tu->tread_sem);
+               if (tu->timeri) /* too late */
                        return -EBUSY;
-               }
-               if (get_user(xarg, p)) {
-                       mutex_unlock(&tu->tread_sem);
+               if (get_user(xarg, p))
                        return -EFAULT;
-               }
                tu->tread = xarg ? 1 : 0;
-               mutex_unlock(&tu->tread_sem);
                return 0;
        }
        case SNDRV_TIMER_IOCTL_GINFO:
@@ -1829,6 +1875,18 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
        return -ENOTTY;
 }
 
+static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+                                unsigned long arg)
+{
+       struct snd_timer_user *tu = file->private_data;
+       long ret;
+
+       mutex_lock(&tu->ioctl_lock);
+       ret = __snd_timer_user_ioctl(file, cmd, arg);
+       mutex_unlock(&tu->ioctl_lock);
+       return ret;
+}
+
 static int snd_timer_user_fasync(int fd, struct file * file, int on)
 {
        struct snd_timer_user *tu;
@@ -1866,6 +1924,10 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
                        remove_wait_queue(&tu->qchange_sleep, &wait);
 
+                       if (tu->disconnected) {
+                               err = -ENODEV;
+                               break;
+                       }
                        if (signal_pending(current)) {
                                err = -ERESTARTSYS;
                                break;
@@ -1915,6 +1977,8 @@ static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
        mask = 0;
        if (tu->qused)
                mask |= POLLIN | POLLRDNORM;
+       if (tu->disconnected)
+               mask |= POLLERR;
 
        return mask;
 }
index 6cf470c..af7ed66 100644 (file)
@@ -1,4 +1,4 @@
 snd-bebob-objs := bebob_command.o bebob_stream.o bebob_proc.o bebob_midi.o \
                  bebob_pcm.o bebob_hwdep.o bebob_terratec.o bebob_yamaha.o \
                  bebob_focusrite.o bebob_maudio.o bebob.o
-obj-m += snd-bebob.o
+obj-$(CONFIG_SND_BEBOB) += snd-bebob.o
index 9ef228e..55b4be9 100644 (file)
@@ -1,3 +1,3 @@
 snd-dice-objs := dice-transaction.o dice-stream.o dice-proc.o dice-midi.o \
                 dice-pcm.o dice-hwdep.o dice.o
-obj-m += snd-dice.o
+obj-$(CONFIG_SND_DICE) += snd-dice.o
index 0c74408..15ef7f7 100644 (file)
@@ -1,4 +1,4 @@
 snd-fireworks-objs := fireworks_transaction.o fireworks_command.o \
                      fireworks_stream.o fireworks_proc.o fireworks_midi.o \
                      fireworks_pcm.o fireworks_hwdep.o fireworks.o
-obj-m += snd-fireworks.o
+obj-$(CONFIG_SND_FIREWORKS) += snd-fireworks.o
index a926850..06ff50f 100644 (file)
@@ -1,3 +1,3 @@
 snd-oxfw-objs := oxfw-command.o oxfw-stream.o oxfw-control.o oxfw-pcm.o \
                 oxfw-proc.o oxfw-midi.o oxfw-hwdep.o oxfw.o
-obj-m += snd-oxfw.o
+obj-$(CONFIG_SND_OXFW) += snd-oxfw.o
index 44dfc7b..09920ba 100644 (file)
@@ -329,6 +329,7 @@ enum {
 
 #define AZX_DCAPS_PRESET_CTHDA \
        (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB |\
+        AZX_DCAPS_NO_64BIT |\
         AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF)
 
 /*
@@ -839,6 +840,36 @@ static int azx_resume(struct device *dev)
 }
 #endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
 
+#ifdef CONFIG_PM_SLEEP
+/* put codec down to D3 at hibernation for Intel SKL+;
+ * otherwise BIOS may still access the codec and screw up the driver
+ */
+#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+
+static int azx_freeze_noirq(struct device *dev)
+{
+       struct pci_dev *pci = to_pci_dev(dev);
+
+       if (IS_SKL_PLUS(pci))
+               pci_set_power_state(pci, PCI_D3hot);
+
+       return 0;
+}
+
+static int azx_thaw_noirq(struct device *dev)
+{
+       struct pci_dev *pci = to_pci_dev(dev);
+
+       if (IS_SKL_PLUS(pci))
+               pci_set_power_state(pci, PCI_D0);
+
+       return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
 #ifdef CONFIG_PM
 static int azx_runtime_suspend(struct device *dev)
 {
@@ -939,6 +970,10 @@ static int azx_runtime_idle(struct device *dev)
 
 static const struct dev_pm_ops azx_pm = {
        SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
+#ifdef CONFIG_PM_SLEEP
+       .freeze_noirq = azx_freeze_noirq,
+       .thaw_noirq = azx_thaw_noirq,
+#endif
        SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
 };
 
@@ -1937,9 +1972,17 @@ out_free:
 static void azx_remove(struct pci_dev *pci)
 {
        struct snd_card *card = pci_get_drvdata(pci);
+       struct azx *chip;
+       struct hda_intel *hda;
+
+       if (card) {
+               /* flush the pending probing work */
+               chip = card->private_data;
+               hda = container_of(chip, struct hda_intel, chip);
+               flush_work(&hda->probe_work);
 
-       if (card)
                snd_card_free(card);
+       }
 }
 
 static void azx_shutdown(struct pci_dev *pci)
@@ -1976,6 +2019,11 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        { PCI_DEVICE(0x8086, 0x8d21),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+       /* Lewisburg */
+       { PCI_DEVICE(0x8086, 0xa1f0),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+       { PCI_DEVICE(0x8086, 0xa270),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Lynx Point-LP */
        { PCI_DEVICE(0x8086, 0x9c20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
@@ -2156,11 +2204,13 @@ static const struct pci_device_id azx_ids[] = {
          .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
          .class_mask = 0xffffff,
          .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+         AZX_DCAPS_NO_64BIT |
          AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
 #else
        /* this entry seems still valid -- i.e. without emu20kx chip */
        { PCI_DEVICE(0x1102, 0x0009),
          .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+         AZX_DCAPS_NO_64BIT |
          AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
 #endif
        /* CM8888 */
index 57bb5a5..8189f02 100644 (file)
@@ -111,6 +111,7 @@ struct alc_spec {
        void (*power_hook)(struct hda_codec *codec);
 #endif
        void (*shutup)(struct hda_codec *codec);
+       void (*reboot_notify)(struct hda_codec *codec);
 
        int init_amp;
        int codec_variant;      /* flag for other variants */
@@ -773,6 +774,25 @@ static inline void alc_shutup(struct hda_codec *codec)
                snd_hda_shutup_pins(codec);
 }
 
+static void alc_reboot_notify(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (spec && spec->reboot_notify)
+               spec->reboot_notify(codec);
+       else
+               alc_shutup(codec);
+}
+
+/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
+static void alc_d3_at_reboot(struct hda_codec *codec)
+{
+       snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+       snd_hda_codec_write(codec, codec->core.afg, 0,
+                           AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+       msleep(10);
+}
+
 #define alc_free       snd_hda_gen_free
 
 #ifdef CONFIG_PM
@@ -818,7 +838,7 @@ static const struct hda_codec_ops alc_patch_ops = {
        .suspend = alc_suspend,
        .check_power_status = snd_hda_gen_check_power_status,
 #endif
-       .reboot_notify = alc_shutup,
+       .reboot_notify = alc_reboot_notify,
 };
 
 
@@ -1767,10 +1787,12 @@ enum {
        ALC889_FIXUP_MBA11_VREF,
        ALC889_FIXUP_MBA21_VREF,
        ALC889_FIXUP_MP11_VREF,
+       ALC889_FIXUP_MP41_VREF,
        ALC882_FIXUP_INV_DMIC,
        ALC882_FIXUP_NO_PRIMARY_HP,
        ALC887_FIXUP_ASUS_BASS,
        ALC887_FIXUP_BASS_CHMAP,
+       ALC882_FIXUP_DISABLE_AAMIX,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1854,7 +1876,7 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
                                  const struct hda_fixup *fix, int action)
 {
        struct alc_spec *spec = codec->spec;
-       static hda_nid_t nids[2] = { 0x14, 0x15 };
+       static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 };
        int i;
 
        if (action != HDA_FIXUP_ACT_INIT)
@@ -1932,6 +1954,8 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
 
 static void alc_fixup_bass_chmap(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action);
+static void alc_fixup_disable_aamix(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action);
 
 static const struct hda_fixup alc882_fixups[] = {
        [ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2142,6 +2166,12 @@ static const struct hda_fixup alc882_fixups[] = {
                .chained = true,
                .chain_id = ALC885_FIXUP_MACPRO_GPIO,
        },
+       [ALC889_FIXUP_MP41_VREF] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc889_fixup_mbp_vref,
+               .chained = true,
+               .chain_id = ALC885_FIXUP_MACPRO_GPIO,
+       },
        [ALC882_FIXUP_INV_DMIC] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_inv_dmic,
@@ -2163,6 +2193,10 @@ static const struct hda_fixup alc882_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_bass_chmap,
        },
+       [ALC882_FIXUP_DISABLE_AAMIX] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_disable_aamix,
+       },
 };
 
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2220,7 +2254,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
-       SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
+       SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF),
        SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
@@ -2230,6 +2264,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
        SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -4194,6 +4229,8 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->shutup = alc_no_shutup; /* reduce click noise */
+               spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                codec->power_save_node = 0; /* avoid click noises */
                snd_hda_apply_pincfgs(codec, pincfgs);
@@ -4525,6 +4562,7 @@ enum {
        ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
        ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC292_FIXUP_TPT440_DOCK,
+       ALC292_FIXUP_TPT440,
        ALC283_FIXUP_BXBT2807_MIC,
        ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
        ALC282_FIXUP_ASPIRE_V5_PINS,
@@ -4993,6 +5031,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
        },
+       [ALC292_FIXUP_TPT440] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_disable_aamix,
+               .chained = true,
+               .chain_id = ALC292_FIXUP_TPT440_DOCK,
+       },
        [ALC283_FIXUP_BXBT2807_MIC] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -5107,6 +5151,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+       SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
@@ -5116,6 +5161,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
+       SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5227,12 +5273,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
-       SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440),
        SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -5322,6 +5369,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
        {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
        {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
+       {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
        {}
 };
 
@@ -5447,6 +5495,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x21, 0x02211040}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC255_STANDARD_PINS,
+               {0x12, 0x90a60170},
+               {0x14, 0x90171130},
+               {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60170},
                {0x14, 0x90170140},
                {0x17, 0x40000000},
@@ -6456,6 +6508,7 @@ static const struct hda_fixup alc662_fixups[] = {
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
        SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
+       SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
        SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
@@ -6473,6 +6526,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
        SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
+       SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
        SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
        SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
index b1bc667..8e7d4c0 100644 (file)
@@ -702,6 +702,7 @@ static bool hp_bnb2011_with_dock(struct hda_codec *codec)
 static bool hp_blike_system(u32 subsystem_id)
 {
        switch (subsystem_id) {
+       case 0x103c1473: /* HP ProBook 6550b */
        case 0x103c1520:
        case 0x103c1521:
        case 0x103c1523:
@@ -3109,6 +3110,29 @@ static void stac92hd71bxx_fixup_hp_hdx(struct hda_codec *codec,
        spec->gpio_led = 0x08;
 }
 
+static bool is_hp_output(struct hda_codec *codec, hda_nid_t pin)
+{
+       unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
+
+       /* count line-out, too, as BIOS sets often so */
+       return get_defcfg_connect(pin_cfg) != AC_JACK_PORT_NONE &&
+               (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
+                get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT);
+}
+
+static void fixup_hp_headphone(struct hda_codec *codec, hda_nid_t pin)
+{
+       unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
+
+       /* It was changed in the BIOS to just satisfy MS DTM.
+        * Lets turn it back into slaved HP
+        */
+       pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) |
+               (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT);
+       pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) |
+               0x1f;
+       snd_hda_codec_set_pincfg(codec, pin, pin_cfg);
+}
 
 static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
                                   const struct hda_fixup *fix, int action)
@@ -3118,22 +3142,12 @@ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
        if (action != HDA_FIXUP_ACT_PRE_PROBE)
                return;
 
-       if (hp_blike_system(codec->core.subsystem_id)) {
-               unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f);
-               if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
-                       get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER  ||
-                       get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) {
-                       /* It was changed in the BIOS to just satisfy MS DTM.
-                        * Lets turn it back into slaved HP
-                        */
-                       pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE))
-                                       | (AC_JACK_HP_OUT <<
-                                               AC_DEFCFG_DEVICE_SHIFT);
-                       pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC
-                                                       | AC_DEFCFG_SEQUENCE)))
-                                                               | 0x1f;
-                       snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg);
-               }
+       /* when both output A and F are assigned, these are supposedly
+        * dock and built-in headphones; fix both pin configs
+        */
+       if (is_hp_output(codec, 0x0a) && is_hp_output(codec, 0x0f)) {
+               fixup_hp_headphone(codec, 0x0a);
+               fixup_hp_headphone(codec, 0x0f);
        }
 
        if (find_mute_led_cfg(codec, 1))
index 2306ccf..77c963c 100644 (file)
@@ -741,10 +741,11 @@ snd_rme96_playback_setrate(struct rme96 *rme96,
        {
                /* change to/from double-speed: reset the DAC (if available) */
                snd_rme96_reset_dac(rme96);
+               return 1; /* need to restore volume */
        } else {
                writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
+               return 0;
        }
-       return 0;
 }
 
 static int
@@ -980,6 +981,7 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
        struct rme96 *rme96 = snd_pcm_substream_chip(substream);
        struct snd_pcm_runtime *runtime = substream->runtime;
        int err, rate, dummy;
+       bool apply_dac_volume = false;
 
        runtime->dma_area = (void __force *)(rme96->iobase +
                                             RME96_IO_PLAY_BUFFER);
@@ -993,24 +995,26 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
        {
                 /* slave clock */
                 if ((int)params_rate(params) != rate) {
-                       spin_unlock_irq(&rme96->lock);
-                       return -EIO;                    
-                }
-       } else if ((err = snd_rme96_playback_setrate(rme96, params_rate(params))) < 0) {
-               spin_unlock_irq(&rme96->lock);
-               return err;
-       }
-       if ((err = snd_rme96_playback_setformat(rme96, params_format(params))) < 0) {
-               spin_unlock_irq(&rme96->lock);
-               return err;
+                       err = -EIO;
+                       goto error;
+               }
+       } else {
+               err = snd_rme96_playback_setrate(rme96, params_rate(params));
+               if (err < 0)
+                       goto error;
+               apply_dac_volume = err > 0; /* need to restore volume later? */
        }
+
+       err = snd_rme96_playback_setformat(rme96, params_format(params));
+       if (err < 0)
+               goto error;
        snd_rme96_setframelog(rme96, params_channels(params), 1);
        if (rme96->capture_periodsize != 0) {
                if (params_period_size(params) << rme96->playback_frlog !=
                    rme96->capture_periodsize)
                {
-                       spin_unlock_irq(&rme96->lock);
-                       return -EBUSY;
+                       err = -EBUSY;
+                       goto error;
                }
        }
        rme96->playback_periodsize =
@@ -1021,9 +1025,16 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
                rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP);
                writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER);
        }
+
+       err = 0;
+ error:
        spin_unlock_irq(&rme96->lock);
-               
-       return 0;
+       if (apply_dac_volume) {
+               usleep_range(3000, 10000);
+               snd_rme96_apply_dac_volume(rme96);
+       }
+
+       return err;
 }
 
 static int
index ee91edc..1319189 100644 (file)
@@ -1354,7 +1354,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
        bool reconfig;
        unsigned int aif_tx_state, aif_rx_state;
 
-       if (params_rate(params) % 8000)
+       if (params_rate(params) % 4000)
                rates = &arizona_44k1_bclk_rates[0];
        else
                rates = &arizona_48k_bclk_rates[0];
index c5f35a0..3ad7f5b 100644 (file)
@@ -85,7 +85,15 @@ static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0);
 static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0);
 
-static const int deemph_settings[] = { 0, 32000, 44100, 48000 };
+static const struct {
+       int rate;
+       unsigned int val;
+} deemph_settings[] = {
+       { 0,     ES8328_DACCONTROL6_DEEMPH_OFF },
+       { 32000, ES8328_DACCONTROL6_DEEMPH_32k },
+       { 44100, ES8328_DACCONTROL6_DEEMPH_44_1k },
+       { 48000, ES8328_DACCONTROL6_DEEMPH_48k },
+};
 
 static int es8328_set_deemph(struct snd_soc_codec *codec)
 {
@@ -97,21 +105,22 @@ static int es8328_set_deemph(struct snd_soc_codec *codec)
         * rate.
         */
        if (es8328->deemph) {
-               best = 1;
-               for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
-                       if (abs(deemph_settings[i] - es8328->playback_fs) <
-                           abs(deemph_settings[best] - es8328->playback_fs))
+               best = 0;
+               for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) {
+                       if (abs(deemph_settings[i].rate - es8328->playback_fs) <
+                           abs(deemph_settings[best].rate - es8328->playback_fs))
                                best = i;
                }
 
-               val = best << 1;
+               val = deemph_settings[best].val;
        } else {
-               val = 0;
+               val = ES8328_DACCONTROL6_DEEMPH_OFF;
        }
 
        dev_dbg(codec->dev, "Set deemphasis %d\n", val);
 
-       return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 0x6, val);
+       return snd_soc_update_bits(codec, ES8328_DACCONTROL6,
+                       ES8328_DACCONTROL6_DEEMPH_MASK, val);
 }
 
 static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
index cb36afe..156c748 100644 (file)
@@ -153,6 +153,7 @@ int es8328_probe(struct device *dev, struct regmap *regmap);
 #define ES8328_DACCONTROL6_CLICKFREE (1 << 3)
 #define ES8328_DACCONTROL6_DAC_INVR (1 << 4)
 #define ES8328_DACCONTROL6_DAC_INVL (1 << 5)
+#define ES8328_DACCONTROL6_DEEMPH_MASK (3 << 6)
 #define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6)
 #define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6)
 #define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6)
index 118b003..154c1a2 100644 (file)
@@ -365,8 +365,8 @@ static struct reg_default wm8962_reg[] = {
        { 16924, 0x0059 },   /* R16924 - HDBASS_PG_1 */
        { 16925, 0x999A },   /* R16925 - HDBASS_PG_0 */
 
-       { 17048, 0x0083 },   /* R17408 - HPF_C_1 */
-       { 17049, 0x98AD },   /* R17409 - HPF_C_0 */
+       { 17408, 0x0083 },   /* R17408 - HPF_C_1 */
+       { 17409, 0x98AD },   /* R17409 - HPF_C_0 */
 
        { 17920, 0x007F },   /* R17920 - ADCL_RETUNE_C1_1 */
        { 17921, 0xFFFF },   /* R17921 - ADCL_RETUNE_C1_0 */
index ff0e464..88317c1 100644 (file)
@@ -575,6 +575,7 @@ static const struct regmap_config wm8974_regmap = {
        .max_register = WM8974_MONOMIX,
        .reg_defaults = wm8974_reg_defaults,
        .num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults),
+       .cache_type = REGCACHE_FLAT,
 };
 
 static int wm8974_probe(struct snd_soc_codec *codec)
index 23c91fa..76dd8c6 100644 (file)
@@ -221,8 +221,8 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp)
 
        /* wait for XDATA to be cleared */
        cnt = 0;
-       while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) &
-                ~XRDATA) && (cnt < 100000))
+       while ((mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & XRDATA) &&
+              (cnt < 100000))
                cnt++;
 
        /* Release TX state machine */
index 8c7dc51..f7a0cb7 100644 (file)
@@ -214,7 +214,7 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
                RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8),
                RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc),
                RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0),
-               RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1c4),
+               RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1d4),
                RSND_GEN_M_REG(SRC_SWRSR,       0x200,  0x40),
                RSND_GEN_M_REG(SRC_SRCIR,       0x204,  0x40),
                RSND_GEN_M_REG(SRC_ADINR,       0x214,  0x40),
index 025c38f..1874cf0 100644 (file)
@@ -623,6 +623,7 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
        struct snd_pcm *be_pcm;
        char new_name[64];
        int ret = 0, direction = 0;
+       int playback = 0, capture = 0;
 
        if (rtd->num_codecs > 1) {
                dev_err(rtd->card->dev, "Multicodec not supported for compressed stream\n");
@@ -634,11 +635,27 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
                        rtd->dai_link->stream_name, codec_dai->name, num);
 
        if (codec_dai->driver->playback.channels_min)
+               playback = 1;
+       if (codec_dai->driver->capture.channels_min)
+               capture = 1;
+
+       capture = capture && cpu_dai->driver->capture.channels_min;
+       playback = playback && cpu_dai->driver->playback.channels_min;
+
+       /*
+        * Compress devices are unidirectional so only one of the directions
+        * should be set, check for that (xor)
+        */
+       if (playback + capture != 1) {
+               dev_err(rtd->card->dev, "Invalid direction for compress P %d, C %d\n",
+                               playback, capture);
+               return -EINVAL;
+       }
+
+       if(playback)
                direction = SND_COMPRESS_PLAYBACK;
-       else if (codec_dai->driver->capture.channels_min)
-               direction = SND_COMPRESS_CAPTURE;
        else
-               return -EINVAL;
+               direction = SND_COMPRESS_CAPTURE;
 
        compr = kzalloc(sizeof(*compr), GFP_KERNEL);
        if (compr == NULL) {
index cd8ed2e..f9a9752 100644 (file)
@@ -1336,6 +1336,8 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
                }
        }
 
+       snd_usb_mixer_fu_apply_quirk(state->mixer, cval, unitid, kctl);
+
        range = (cval->max - cval->min) / cval->res;
        /*
         * Are there devices with volume range more than 255? I use a bit more
index 6a803ef..ddca654 100644 (file)
@@ -348,13 +348,6 @@ static struct usbmix_name_map bose_companion5_map[] = {
        { 0 }   /* terminator */
 };
 
-/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
-static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
-static struct usbmix_name_map dragonfly_1_2_map[] = {
-       { 7, NULL, .dB = &dragonfly_1_2_dB },
-       { 0 }   /* terminator */
-};
-
 /*
  * Control map entries
  */
@@ -470,11 +463,6 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x05a7, 0x1020),
                .map = bose_companion5_map,
        },
-       {
-               /* Dragonfly DAC 1.2 */
-               .id = USB_ID(0x21b4, 0x0081),
-               .map = dragonfly_1_2_map,
-       },
        { 0 } /* terminator */
 };
 
index 337c317..db9547d 100644 (file)
@@ -37,6 +37,7 @@
 #include <sound/control.h>
 #include <sound/hwdep.h>
 #include <sound/info.h>
+#include <sound/tlv.h>
 
 #include "usbaudio.h"
 #include "mixer.h"
@@ -802,7 +803,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
                return 0;
 
        kcontrol->private_value &= ~(0xff << 24);
-       kcontrol->private_value |= newval;
+       kcontrol->private_value |= (unsigned int)newval << 24;
        err = snd_ni_update_cur_val(list);
        return err < 0 ? err : 1;
 }
@@ -1843,3 +1844,39 @@ void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
        }
 }
 
+static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
+                                        struct snd_kcontrol *kctl)
+{
+       /* Approximation using 10 ranges based on output measurement on hw v1.2.
+        * This seems close to the cubic mapping e.g. alsamixer uses. */
+       static const DECLARE_TLV_DB_RANGE(scale,
+                0,  1, TLV_DB_MINMAX_ITEM(-5300, -4970),
+                2,  5, TLV_DB_MINMAX_ITEM(-4710, -4160),
+                6,  7, TLV_DB_MINMAX_ITEM(-3884, -3710),
+                8, 14, TLV_DB_MINMAX_ITEM(-3443, -2560),
+               15, 16, TLV_DB_MINMAX_ITEM(-2475, -2324),
+               17, 19, TLV_DB_MINMAX_ITEM(-2228, -2031),
+               20, 26, TLV_DB_MINMAX_ITEM(-1910, -1393),
+               27, 31, TLV_DB_MINMAX_ITEM(-1322, -1032),
+               32, 40, TLV_DB_MINMAX_ITEM(-968, -490),
+               41, 50, TLV_DB_MINMAX_ITEM(-441, 0),
+       );
+
+       usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk\n");
+       kctl->tlv.p = scale;
+       kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
+       kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+}
+
+void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
+                                 struct usb_mixer_elem_info *cval, int unitid,
+                                 struct snd_kcontrol *kctl)
+{
+       switch (mixer->chip->usb_id) {
+       case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
+               if (unitid == 7 && cval->min == 0 && cval->max == 50)
+                       snd_dragonfly_quirk_db_scale(mixer, kctl);
+               break;
+       }
+}
+
index bdbfab0..177c329 100644 (file)
@@ -9,5 +9,9 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
 void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
                                    int unitid);
 
+void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
+                                 struct usb_mixer_elem_info *cval, int unitid,
+                                 struct snd_kcontrol *kctl);
+
 #endif /* SND_USB_MIXER_QUIRKS_H */
 
index eef9b8e..fb9a8a5 100644 (file)
@@ -1122,6 +1122,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+       case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
                return true;
        }
        return false;
@@ -1265,6 +1266,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
        case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
        case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
+       case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
                if (fp->altsetting == 2)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;