]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 Jan 2018 18:14:09 +0000 (10:14 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 Jan 2018 18:14:09 +0000 (10:14 -0800)
Pull locking fixes from Ingo Molnar:
 "No functional effects intended: removes leftovers from recent lockdep
  and refcounts work"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/refcounts: Remove stale comment from the ARCH_HAS_REFCOUNT Kconfig entry
  locking/lockdep: Remove cross-release leftovers
  locking/Documentation: Remove stale crossrelease_fullstack parameter

160 files changed:
Documentation/networking/index.rst
Documentation/networking/msg_zerocopy.rst
MAINTAINERS
arch/mips/kernel/cps-vec.S
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/powerpc/kvm/book3s_64_mmu.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/riscv/configs/defconfig
arch/riscv/include/asm/csr.h
arch/riscv/include/asm/io.h
arch/riscv/include/asm/irqflags.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/ptrace.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/include/asm/uaccess.h
arch/riscv/include/asm/unistd.h
arch/riscv/include/asm/vdso-syscalls.h [deleted file]
arch/riscv/include/uapi/asm/syscalls.h [new file with mode: 0644]
arch/riscv/kernel/entry.S
arch/riscv/kernel/process.c
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/vdso/flush_icache.S
arch/riscv/mm/fault.c
arch/sh/boards/mach-se/770x/setup.c
arch/sh/include/mach-se/mach/se.h
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/xen-ops.h
block/blk-core.c
block/blk-mq.c
block/blk.h
crypto/algapi.c
drivers/block/loop.c
drivers/block/rbd.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/vc4/vc4_irq.c
drivers/gpu/drm/vc4/vc4_v3d.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/device.c
drivers/infiniband/core/nldev.c
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/s3cmci.c
drivers/net/can/flexcan.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/vxcan.c
drivers/net/dsa/b53/b53_common.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/geneve.c
drivers/net/macvlan.c
drivers/net/phy/mdio-sun4i.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp-bus.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/pmc.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/xen-netfront.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/fcloop.c
drivers/of/of_mdio.c
drivers/platform/x86/wmi.c
drivers/xen/gntdev.c
include/linux/bpf.h
include/linux/sh_eth.h
include/net/sctp/structs.h
include/net/vxlan.h
include/uapi/linux/if_ether.h
include/uapi/linux/libc-compat.h
include/uapi/linux/netfilter/nf_conntrack_common.h
init/Kconfig
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/sockmap.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
lib/test_bpf.c
net/8021q/vlan.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/caif/cfcnfg.c
net/caif/cfctrl.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/rtnetlink.c
net/core/sock_diag.c
net/core/sysctl_net_core.c
net/ipv4/raw.c
net/ipv6/exthdrs.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/mac80211/rx.c
net/netfilter/nf_tables_api.c
net/rds/rdma.c
net/sched/act_gact.c
net/sched/act_mirred.c
net/sctp/input.c
net/sctp/socket.c
net/sctp/stream.c
net/sctp/transport.c
net/socket.c
net/tipc/group.c
net/wireless/nl80211.c
sound/core/oss/pcm_oss.c
sound/core/oss/pcm_plugin.c
sound/core/pcm_lib.c
sound/core/pcm_native.c
sound/drivers/aloop.c
tools/testing/selftests/bpf/test_align.c

index 66e62086624501fe76c170f77efc1cbc0844032f..7d4b15977d61201eb8b265293d3d95a6b042c7e6 100644 (file)
@@ -9,6 +9,7 @@ Contents:
    batman-adv
    kapi
    z8530book
+   msg_zerocopy
 
 .. only::  subproject
 
@@ -16,4 +17,3 @@ Contents:
    =======
 
    * :ref:`genindex`
-
index 77f6d7e25cfda65ac36e0c47bb2f8bdacc2fee2a..291a012649678035b5d0fdc306904093ceedf610 100644 (file)
@@ -72,6 +72,10 @@ this flag, a process must first signal intent by setting a socket option:
        if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
                error(1, errno, "setsockopt zerocopy");
 
+Setting the socket option only works when the socket is in its initial
+(TCP_CLOSED) state.  Trying to set the option for a socket returned by accept(),
+for example, will lead to an EBUSY error. In this case, the option should be set
+to the listening socket and it will be inherited by the accepted sockets.
 
 Transmission
 ------------
index 95c3fa1f520fba6e36899271b5dc0795d5c4bf5c..d76af75a653afcfeece6f27bc1290313528f5a98 100644 (file)
@@ -10134,7 +10134,7 @@ F:      drivers/irqchip/irq-ompic.c
 F:     drivers/irqchip/irq-or1k-*
 
 OPENVSWITCH
-M:     Pravin Shelar <pshelar@nicira.com>
+M:     Pravin B Shelar <pshelar@ovn.org>
 L:     netdev@vger.kernel.org
 L:     dev@openvswitch.org
 W:     http://openvswitch.org
index c7ed26029cbbcf0ed7458a145d490957a59abb4e..e68e6e04063a7e0d9f30c50ebac3b082ac7b8b88 100644 (file)
@@ -235,6 +235,7 @@ LEAF(mips_cps_core_init)
        has_mt  t0, 3f
 
        .set    push
+       .set    MIPS_ISA_LEVEL_RAW
        .set    mt
 
        /* Only allow 1 TC per VPE to execute... */
@@ -388,6 +389,7 @@ LEAF(mips_cps_boot_vpes)
 #elif defined(CONFIG_MIPS_MT)
 
        .set    push
+       .set    MIPS_ISA_LEVEL_RAW
        .set    mt
 
        /* If the core doesn't support MT then return */
index 45d0b6b037eeb6a985318df16df6f73a7f1dad9c..57028d49c202aeab53a1b4ccd5d21b3b3052cedb 100644 (file)
@@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
        struct task_struct *t;
        int max_users;
 
+       /* If nothing to change, return right away, successfully.  */
+       if (value == mips_get_process_fp_mode(task))
+               return 0;
+
+       /* Only accept a mode change if 64-bit FP enabled for o32.  */
+       if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+               return -EOPNOTSUPP;
+
+       /* And only for o32 tasks.  */
+       if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
+               return -EOPNOTSUPP;
+
        /* Check the value is valid */
        if (value & ~known_bits)
                return -EOPNOTSUPP;
index efbd8df8b6652e7a81baed64134858fd8b3d733a..0b23b1ad99e65f1e21d1810340f9dd306483b8d3 100644 (file)
@@ -419,63 +419,160 @@ static int gpr64_set(struct task_struct *target,
 
 #endif /* CONFIG_64BIT */
 
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
+ * correspond 1:1 to buffer slots.  Only general registers are copied.
+ */
+static int fpr_get_fpa(struct task_struct *target,
+                      unsigned int *pos, unsigned int *count,
+                      void **kbuf, void __user **ubuf)
+{
+       return user_regset_copyout(pos, count, kbuf, ubuf,
+                                  &target->thread.fpu,
+                                  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
+ * general register slots are copied to buffer slots.  Only general
+ * registers are copied.
+ */
+static int fpr_get_msa(struct task_struct *target,
+                      unsigned int *pos, unsigned int *count,
+                      void **kbuf, void __user **ubuf)
+{
+       unsigned int i;
+       u64 fpr_val;
+       int err;
+
+       BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+       for (i = 0; i < NUM_FPU_REGS; i++) {
+               fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+               err = user_regset_copyout(pos, count, kbuf, ubuf,
+                                         &fpr_val, i * sizeof(elf_fpreg_t),
+                                         (i + 1) * sizeof(elf_fpreg_t));
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately.
+ */
 static int fpr_get(struct task_struct *target,
                   const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   void *kbuf, void __user *ubuf)
 {
-       unsigned i;
+       const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
        int err;
-       u64 fpr_val;
 
-       /* XXX fcr31  */
+       if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+               err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
+       else
+               err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
+       if (err)
+               return err;
 
-       if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
-               return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                          &target->thread.fpu,
-                                          0, sizeof(elf_fpregset_t));
+       err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 &target->thread.fpu.fcr31,
+                                 fcr31_pos, fcr31_pos + sizeof(u32));
 
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
-               err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                         &fpr_val, i * sizeof(elf_fpreg_t),
-                                         (i + 1) * sizeof(elf_fpreg_t));
+       return err;
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
+ * context's general register slots.  Only general registers are copied.
+ */
+static int fpr_set_fpa(struct task_struct *target,
+                      unsigned int *pos, unsigned int *count,
+                      const void **kbuf, const void __user **ubuf)
+{
+       return user_regset_copyin(pos, count, kbuf, ubuf,
+                                 &target->thread.fpu,
+                                 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
+ * bits only of FP context's general register slots.  Only general
+ * registers are copied.
+ */
+static int fpr_set_msa(struct task_struct *target,
+                      unsigned int *pos, unsigned int *count,
+                      const void **kbuf, const void __user **ubuf)
+{
+       unsigned int i;
+       u64 fpr_val;
+       int err;
+
+       BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+       for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+               err = user_regset_copyin(pos, count, kbuf, ubuf,
+                                        &fpr_val, i * sizeof(elf_fpreg_t),
+                                        (i + 1) * sizeof(elf_fpreg_t));
                if (err)
                        return err;
+               set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
        }
 
        return 0;
 }
 
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately.
+ *
+ * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
+ * which is supposed to have been guaranteed by the kernel before
+ * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
+ * so that we can safely avoid preinitializing temporaries for
+ * partial register writes.
+ */
 static int fpr_set(struct task_struct *target,
                   const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   const void *kbuf, const void __user *ubuf)
 {
-       unsigned i;
+       const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+       u32 fcr31;
        int err;
-       u64 fpr_val;
 
-       /* XXX fcr31  */
+       BUG_ON(count % sizeof(elf_fpreg_t));
+
+       if (pos + count > sizeof(elf_fpregset_t))
+               return -EIO;
 
        init_fp_ctx(target);
 
-       if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
-               return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                         &target->thread.fpu,
-                                         0, sizeof(elf_fpregset_t));
+       if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+               err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
+       else
+               err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
+       if (err)
+               return err;
 
-       BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
-       for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
+       if (count > 0) {
                err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                        &fpr_val, i * sizeof(elf_fpreg_t),
-                                        (i + 1) * sizeof(elf_fpreg_t));
+                                        &fcr31,
+                                        fcr31_pos, fcr31_pos + sizeof(u32));
                if (err)
                        return err;
-               set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+
+               ptrace_setfcr31(target, fcr31);
        }
 
-       return 0;
+       return err;
 }
 
 enum mips_regset {
index 29ebe2fd58674c59f27803a5426e4d2414f39418..a93d719edc906887b0f4bf412b2b76ac04babfec 100644 (file)
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
                gpte->may_read = true;
                gpte->may_write = true;
                gpte->page_size = MMU_PAGE_4K;
+               gpte->wimg = HPTE_R_M;
 
                return 0;
        }
index 966097232d2147bbcd79df41354a5f973fb9b7c1..b73dbc9e797da76bf4305a73972c4af925fbd70d 100644 (file)
@@ -65,11 +65,17 @@ struct kvm_resize_hpt {
        u32 order;
 
        /* These fields protected by kvm->lock */
+
+       /* Possible values and their usage:
+        *  <0     an error occurred during allocation,
+        *  -EBUSY allocation is in the progress,
+        *  0      allocation made successfuly.
+        */
        int error;
-       bool prepare_done;
 
-       /* Private to the work thread, until prepare_done is true,
-        * then protected by kvm->resize_hpt_sem */
+       /* Private to the work thread, until error != -EBUSY,
+        * then protected by kvm->lock.
+        */
        struct kvm_hpt_info hpt;
 };
 
@@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
                 * Reset all the reverse-mapping chains for all memslots
                 */
                kvmppc_rmap_reset(kvm);
-               /* Ensure that each vcpu will flush its TLB on next entry. */
-               cpumask_setall(&kvm->arch.need_tlb_flush);
                err = 0;
                goto out;
        }
@@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
        kvmppc_set_hpt(kvm, &info);
 
 out:
+       if (err == 0)
+               /* Ensure that each vcpu will flush its TLB on next entry. */
+               cpumask_setall(&kvm->arch.need_tlb_flush);
+
        mutex_unlock(&kvm->lock);
        return err;
 }
@@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
 
 static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
 {
-       BUG_ON(kvm->arch.resize_hpt != resize);
+       if (WARN_ON(!mutex_is_locked(&kvm->lock)))
+               return;
 
        if (!resize)
                return;
 
-       if (resize->hpt.virt)
-               kvmppc_free_hpt(&resize->hpt);
+       if (resize->error != -EBUSY) {
+               if (resize->hpt.virt)
+                       kvmppc_free_hpt(&resize->hpt);
+               kfree(resize);
+       }
 
-       kvm->arch.resize_hpt = NULL;
-       kfree(resize);
+       if (kvm->arch.resize_hpt == resize)
+               kvm->arch.resize_hpt = NULL;
 }
 
 static void resize_hpt_prepare_work(struct work_struct *work)
@@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work)
                                                     struct kvm_resize_hpt,
                                                     work);
        struct kvm *kvm = resize->kvm;
-       int err;
+       int err = 0;
 
-       resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
-                        resize->order);
-
-       err = resize_hpt_allocate(resize);
+       if (WARN_ON(resize->error != -EBUSY))
+               return;
 
        mutex_lock(&kvm->lock);
 
+       /* Request is still current? */
+       if (kvm->arch.resize_hpt == resize) {
+               /* We may request large allocations here:
+                * do not sleep with kvm->lock held for a while.
+                */
+               mutex_unlock(&kvm->lock);
+
+               resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
+                                resize->order);
+
+               err = resize_hpt_allocate(resize);
+
+               /* We have strict assumption about -EBUSY
+                * when preparing for HPT resize.
+                */
+               if (WARN_ON(err == -EBUSY))
+                       err = -EINPROGRESS;
+
+               mutex_lock(&kvm->lock);
+               /* It is possible that kvm->arch.resize_hpt != resize
+                * after we grab kvm->lock again.
+                */
+       }
+
        resize->error = err;
-       resize->prepare_done = true;
+
+       if (kvm->arch.resize_hpt != resize)
+               resize_hpt_release(kvm, resize);
 
        mutex_unlock(&kvm->lock);
 }
@@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
 
        if (resize) {
                if (resize->order == shift) {
-                       /* Suitable resize in progress */
-                       if (resize->prepare_done) {
-                               ret = resize->error;
-                               if (ret != 0)
-                                       resize_hpt_release(kvm, resize);
-                       } else {
+                       /* Suitable resize in progress? */
+                       ret = resize->error;
+                       if (ret == -EBUSY)
                                ret = 100; /* estimated time in ms */
-                       }
+                       else if (ret)
+                               resize_hpt_release(kvm, resize);
 
                        goto out;
                }
@@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
                ret = -ENOMEM;
                goto out;
        }
+
+       resize->error = -EBUSY;
        resize->order = shift;
        resize->kvm = kvm;
        INIT_WORK(&resize->work, resize_hpt_prepare_work);
@@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
        if (!resize || (resize->order != shift))
                goto out;
 
-       ret = -EBUSY;
-       if (!resize->prepare_done)
-               goto out;
-
        ret = resize->error;
-       if (ret != 0)
+       if (ret)
                goto out;
 
        ret = resize_hpt_rehash(resize);
-       if (ret != 0)
+       if (ret)
                goto out;
 
        resize_hpt_pivot(resize);
index d0dc8624198f8e4663800c4ca603aea98d4ba128..7deaeeb14b9358d8a4e6f1107cd42bba0605a264 100644 (file)
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
 #define MSR_USER32 MSR_USER
 #define MSR_USER64 MSR_USER
 #define HW_PAGE_SIZE PAGE_SIZE
+#define HPTE_R_M   _PAGE_COHERENT
 #endif
 
 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                pte.eaddr = eaddr;
                pte.vpage = eaddr >> 12;
                pte.page_size = MMU_PAGE_64K;
+               pte.wimg = HPTE_R_M;
        }
 
        switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..47dacf06c679f3eff22c0a6ab0f619d9b4019ab5 100644 (file)
@@ -0,0 +1,75 @@
+CONFIG_SMP=y
+CONFIG_PCI=y
+CONFIG_PCIE_XILINX=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_RAS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+# CONFIG_RCU_TRACE is not set
+CONFIG_CRYPTO_USER_API_HASH=y
index 0d64bc9f4f91562872b36231e5b593887498cde2..3c7a2c97e377a5af2923c8e7d96cf04e57e66892 100644 (file)
 #include <linux/const.h>
 
 /* Status register flags */
-#define SR_IE   _AC(0x00000002, UL) /* Interrupt Enable */
-#define SR_PIE  _AC(0x00000020, UL) /* Previous IE */
-#define SR_PS   _AC(0x00000100, UL) /* Previously Supervisor */
-#define SR_SUM  _AC(0x00040000, UL) /* Supervisor may access User Memory */
+#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_SPIE        _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
 
 #define SR_FS           _AC(0x00006000, UL) /* Floating-point Status */
 #define SR_FS_OFF       _AC(0x00000000, UL)
index a82ce599b639813c9ed3ad697f217cb09a6538e1..b269451e7e85577c76cb718283806fdfb6f76532 100644 (file)
@@ -21,8 +21,6 @@
 
 #include <linux/types.h>
 
-#ifdef CONFIG_MMU
-
 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 
 /*
@@ -36,8 +34,6 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 
 extern void iounmap(volatile void __iomem *addr);
 
-#endif /* CONFIG_MMU */
-
 /* Generic IO read/write.  These perform native-endian accesses. */
 #define __raw_writeb __raw_writeb
 static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
index 6fdc860d7f84fd69648af547ff8984946eac93fd..07a3c6d5706ff8fd8f34acbe6c5832fc6e638676 100644 (file)
@@ -27,25 +27,25 @@ static inline unsigned long arch_local_save_flags(void)
 /* unconditionally enable interrupts */
 static inline void arch_local_irq_enable(void)
 {
-       csr_set(sstatus, SR_IE);
+       csr_set(sstatus, SR_SIE);
 }
 
 /* unconditionally disable interrupts */
 static inline void arch_local_irq_disable(void)
 {
-       csr_clear(sstatus, SR_IE);
+       csr_clear(sstatus, SR_SIE);
 }
 
 /* get status and disable interrupts */
 static inline unsigned long arch_local_irq_save(void)
 {
-       return csr_read_clear(sstatus, SR_IE);
+       return csr_read_clear(sstatus, SR_SIE);
 }
 
 /* test flags */
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
-       return !(flags & SR_IE);
+       return !(flags & SR_SIE);
 }
 
 /* test hardware interrupt enable bit */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
 /* set interrupt enabled status */
 static inline void arch_local_irq_restore(unsigned long flags)
 {
-       csr_set(sstatus, flags & SR_IE);
+       csr_set(sstatus, flags & SR_SIE);
 }
 
 #endif /* _ASM_RISCV_IRQFLAGS_H */
index 2cbd92ed1629c00df42b8ceaffd2250b6de7413a..16301966d65b6fd8a54614d12f0815866d19d948 100644 (file)
@@ -20,8 +20,6 @@
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_MMU
-
 /* Page Upper Directory not used in RISC-V */
 #include <asm-generic/pgtable-nopud.h>
 #include <asm/page.h>
@@ -413,8 +411,6 @@ static inline void pgtable_cache_init(void)
        /* No page table caches to initialize */
 }
 
-#endif /* CONFIG_MMU */
-
 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
 #define VMALLOC_END      (PAGE_OFFSET - 1)
 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
index 93b8956e25e46714a5bd789ed0ea15ebb2a687f2..2c5df945d43c9abfdfd197a61d8c92ce20e48133 100644 (file)
@@ -66,7 +66,7 @@ struct pt_regs {
 #define REG_FMT "%08lx"
 #endif
 
-#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0)
+#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0)
 
 
 /* Helpers for working with the instruction pointer */
index 715b0f10af580811dfca3ba067819e07fe1e7374..7b9c24ebdf5293ac73b8155b2c394fc9244a712d 100644 (file)
@@ -15,8 +15,6 @@
 #ifndef _ASM_RISCV_TLBFLUSH_H
 #define _ASM_RISCV_TLBFLUSH_H
 
-#ifdef CONFIG_MMU
-
 #include <linux/mm_types.h>
 
 /*
@@ -64,6 +62,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
        flush_tlb_all();
 }
 
-#endif /* CONFIG_MMU */
-
 #endif /* _ASM_RISCV_TLBFLUSH_H */
index 27b90d64814b8d0422d0316f8e765b6e97081a47..14b0b22fb57875dfe920685265a79da317c517e0 100644 (file)
@@ -127,7 +127,6 @@ extern int fixup_exception(struct pt_regs *state);
  * call.
  */
 
-#ifdef CONFIG_MMU
 #define __get_user_asm(insn, x, ptr, err)                      \
 do {                                                           \
        uintptr_t __tmp;                                        \
@@ -153,13 +152,11 @@ do {                                                              \
        __disable_user_access();                                \
        (x) = __x;                                              \
 } while (0)
-#endif /* CONFIG_MMU */
 
 #ifdef CONFIG_64BIT
 #define __get_user_8(x, ptr, err) \
        __get_user_asm("ld", x, ptr, err)
 #else /* !CONFIG_64BIT */
-#ifdef CONFIG_MMU
 #define __get_user_8(x, ptr, err)                              \
 do {                                                           \
        u32 __user *__ptr = (u32 __user *)(ptr);                \
@@ -193,7 +190,6 @@ do {                                                                \
        (x) = (__typeof__(x))((__typeof__((x)-(x)))(            \
                (((u64)__hi << 32) | __lo)));                   \
 } while (0)
-#endif /* CONFIG_MMU */
 #endif /* CONFIG_64BIT */
 
 
@@ -267,8 +263,6 @@ do {                                                                \
                ((x) = 0, -EFAULT);                             \
 })
 
-
-#ifdef CONFIG_MMU
 #define __put_user_asm(insn, x, ptr, err)                      \
 do {                                                           \
        uintptr_t __tmp;                                        \
@@ -292,14 +286,11 @@ do {                                                              \
                : "rJ" (__x), "i" (-EFAULT));                   \
        __disable_user_access();                                \
 } while (0)
-#endif /* CONFIG_MMU */
-
 
 #ifdef CONFIG_64BIT
 #define __put_user_8(x, ptr, err) \
        __put_user_asm("sd", x, ptr, err)
 #else /* !CONFIG_64BIT */
-#ifdef CONFIG_MMU
 #define __put_user_8(x, ptr, err)                              \
 do {                                                           \
        u32 __user *__ptr = (u32 __user *)(ptr);                \
@@ -329,7 +320,6 @@ do {                                                                \
                : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
        __disable_user_access();                                \
 } while (0)
-#endif /* CONFIG_MMU */
 #endif /* CONFIG_64BIT */
 
 
@@ -438,7 +428,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
  * will set "err" to -EFAULT, while successful accesses return the previous
  * value.
  */
-#ifdef CONFIG_MMU
 #define __cmpxchg_user(ptr, old, new, err, size, lrb, scb)     \
 ({                                                             \
        __typeof__(ptr) __ptr = (ptr);                          \
@@ -508,6 +497,5 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
        (err) = __err;                                          \
        __ret;                                                  \
 })
-#endif /* CONFIG_MMU */
 
 #endif /* _ASM_RISCV_UACCESS_H */
index 9f250ed007cd816e523898a10f32ec6814da6892..2f704a5c4196e30fd2a9a9c3607eeeed3d6f019f 100644 (file)
@@ -14,3 +14,4 @@
 #define __ARCH_HAVE_MMU
 #define __ARCH_WANT_SYS_CLONE
 #include <uapi/asm/unistd.h>
+#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/asm/vdso-syscalls.h b/arch/riscv/include/asm/vdso-syscalls.h
deleted file mode 100644 (file)
index a2ccf18..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2017 SiFive
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
-#define _ASM_RISCV_VDSO_SYSCALLS_H
-
-#ifdef CONFIG_SMP
-
-/* These syscalls are only used by the vDSO and are not in the uapi. */
-#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
-__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
-
-#endif
-
-#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/syscalls.h
new file mode 100644 (file)
index 0000000..818655b
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM__UAPI__SYSCALLS_H
+#define _ASM__UAPI__SYSCALLS_H
+
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
+
+#endif
index 20ee86f782a93b19779236f39a40daa3d9563ac5..7404ec222406290ed03c671a3b4463c61aa49c12 100644 (file)
@@ -196,7 +196,7 @@ handle_syscall:
        addi s2, s2, 0x4
        REG_S s2, PT_SEPC(sp)
        /* System calls run with interrupts enabled */
-       csrs sstatus, SR_IE
+       csrs sstatus, SR_SIE
        /* Trace syscalls, but only if requested by the user. */
        REG_L t0, TASK_TI_FLAGS(tp)
        andi t0, t0, _TIF_SYSCALL_TRACE
@@ -224,8 +224,8 @@ ret_from_syscall:
 
 ret_from_exception:
        REG_L s0, PT_SSTATUS(sp)
-       csrc sstatus, SR_IE
-       andi s0, s0, SR_PS
+       csrc sstatus, SR_SIE
+       andi s0, s0, SR_SPP
        bnez s0, restore_all
 
 resume_userspace:
@@ -255,7 +255,7 @@ work_pending:
        bnez s1, work_resched
 work_notifysig:
        /* Handle pending signals and notify-resume requests */
-       csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
+       csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
        move a0, sp /* pt_regs */
        move a1, s0 /* current_thread_info->flags */
        tail do_notify_resume
index 0d90dcc1fbd36559823e48db2dc15ac4319b063d..d74d4adf2d54f94a4dff39c14d7953b8c0fdbab8 100644 (file)
@@ -76,7 +76,7 @@ void show_regs(struct pt_regs *regs)
 void start_thread(struct pt_regs *regs, unsigned long pc,
        unsigned long sp)
 {
-       regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
+       regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
        regs->sepc = pc;
        regs->sp = sp;
        set_fs(USER_DS);
@@ -110,7 +110,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                const register unsigned long gp __asm__ ("gp");
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->gp = gp;
-               childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
+               childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
 
                p->thread.ra = (unsigned long)ret_from_kernel_thread;
                p->thread.s[0] = usp; /* fn */
index a5bd6401f95e6988a376406f02b7e39cb7c1352e..ade52b903a43f2b27546fdd216b87b7057bc95d4 100644 (file)
@@ -23,5 +23,4 @@
 void *sys_call_table[__NR_syscalls] = {
        [0 ... __NR_syscalls - 1] = sys_ni_syscall,
 #include <asm/unistd.h>
-#include <asm/vdso-syscalls.h>
 };
index b0fbad74e873ea9fff553424b975502b589d7097..023e4d4aef588e139fafcbc9e3fb1ddef0482575 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <linux/linkage.h>
 #include <asm/unistd.h>
-#include <asm/vdso-syscalls.h>
 
        .text
 /* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
index df2ca3c65048f9347781b05309c9552eaa7d0844..0713f3c67ab4242a2e54430331044724bf57f289 100644 (file)
@@ -63,7 +63,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
                goto vmalloc_fault;
 
        /* Enable interrupts if they were enabled in the parent context. */
-       if (likely(regs->sstatus & SR_PIE))
+       if (likely(regs->sstatus & SR_SPIE))
                local_irq_enable();
 
        /*
index 77c35350ee774d4fa26ed5bc57b70d9d03c7bcab..412326d59e6fcebd51469163f6476b88753aaa4c 100644 (file)
@@ -9,6 +9,7 @@
  */
 #include <linux/init.h>
 #include <linux/platform_device.h>
+#include <linux/sh_eth.h>
 #include <mach-se/mach/se.h>
 #include <mach-se/mach/mrshpc.h>
 #include <asm/machvec.h>
@@ -115,13 +116,23 @@ static struct platform_device heartbeat_device = {
 #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
        defined(CONFIG_CPU_SUBTYPE_SH7712)
 /* SH771X Ethernet driver */
+static struct sh_eth_plat_data sh_eth_plat = {
+       .phy = PHY_ID,
+       .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
 static struct resource sh_eth0_resources[] = {
        [0] = {
                .start = SH_ETH0_BASE,
-               .end = SH_ETH0_BASE + 0x1B8,
+               .end = SH_ETH0_BASE + 0x1B8 - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
+               .start = SH_TSU_BASE,
+               .end = SH_TSU_BASE + 0x200 - 1,
+               .flags = IORESOURCE_MEM,
+       },
+       [2] = {
                .start = SH_ETH0_IRQ,
                .end = SH_ETH0_IRQ,
                .flags = IORESOURCE_IRQ,
@@ -132,7 +143,7 @@ static struct platform_device sh_eth0_device = {
        .name = "sh771x-ether",
        .id = 0,
        .dev = {
-               .platform_data = PHY_ID,
+               .platform_data = &sh_eth_plat,
        },
        .num_resources = ARRAY_SIZE(sh_eth0_resources),
        .resource = sh_eth0_resources,
@@ -141,10 +152,15 @@ static struct platform_device sh_eth0_device = {
 static struct resource sh_eth1_resources[] = {
        [0] = {
                .start = SH_ETH1_BASE,
-               .end = SH_ETH1_BASE + 0x1B8,
+               .end = SH_ETH1_BASE + 0x1B8 - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
+               .start = SH_TSU_BASE,
+               .end = SH_TSU_BASE + 0x200 - 1,
+               .flags = IORESOURCE_MEM,
+       },
+       [2] = {
                .start = SH_ETH1_IRQ,
                .end = SH_ETH1_IRQ,
                .flags = IORESOURCE_IRQ,
@@ -155,7 +171,7 @@ static struct platform_device sh_eth1_device = {
        .name = "sh771x-ether",
        .id = 1,
        .dev = {
-               .platform_data = PHY_ID,
+               .platform_data = &sh_eth_plat,
        },
        .num_resources = ARRAY_SIZE(sh_eth1_resources),
        .resource = sh_eth1_resources,
index 4246ef9b07a346ed590be0cd3651f5a370ee700a..aa83fe1ff0b124c002e375e2ce5a83c7c853e29c 100644 (file)
 /* Base address */
 #define SH_ETH0_BASE 0xA7000000
 #define SH_ETH1_BASE 0xA7000400
+#define SH_TSU_BASE  0xA7000800
 /* PHY ID */
 #if defined(CONFIG_CPU_SUBTYPE_SH7710)
 # define PHY_ID 0x00
index c4deb1f34faa6ce7ffe6bcaaebddc3e87b2a9a69..2b8eb4da4d0823bdcdf7bde1feb44132d0113cde 100644 (file)
@@ -3781,7 +3781,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
 {
        if (unlikely(!lapic_in_kernel(vcpu) ||
-                    kvm_event_needs_reinjection(vcpu)))
+                    kvm_event_needs_reinjection(vcpu) ||
+                    vcpu->arch.exception.pending))
                return false;
 
        if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
@@ -5465,30 +5466,34 @@ static void mmu_destroy_caches(void)
 
 int kvm_mmu_module_init(void)
 {
+       int ret = -ENOMEM;
+
        kvm_mmu_clear_all_pte_masks();
 
        pte_list_desc_cache = kmem_cache_create("pte_list_desc",
                                            sizeof(struct pte_list_desc),
                                            0, SLAB_ACCOUNT, NULL);
        if (!pte_list_desc_cache)
-               goto nomem;
+               goto out;
 
        mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
                                                  sizeof(struct kvm_mmu_page),
                                                  0, SLAB_ACCOUNT, NULL);
        if (!mmu_page_header_cache)
-               goto nomem;
+               goto out;
 
        if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
-               goto nomem;
+               goto out;
 
-       register_shrinker(&mmu_shrinker);
+       ret = register_shrinker(&mmu_shrinker);
+       if (ret)
+               goto out;
 
        return 0;
 
-nomem:
+out:
        mmu_destroy_caches();
-       return -ENOMEM;
+       return ret;
 }
 
 /*
index bb31c801f1fc9d45c1dc629bd9c056769584530a..3158dac87f8227c528a586b8acdcc9bcb0357dc2 100644 (file)
@@ -361,7 +361,6 @@ static void recalc_intercepts(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *c, *h;
        struct nested_state *g;
-       u32 h_intercept_exceptions;
 
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 
@@ -372,14 +371,9 @@ static void recalc_intercepts(struct vcpu_svm *svm)
        h = &svm->nested.hsave->control;
        g = &svm->nested;
 
-       /* No need to intercept #UD if L1 doesn't intercept it */
-       h_intercept_exceptions =
-               h->intercept_exceptions & ~(1U << UD_VECTOR);
-
        c->intercept_cr = h->intercept_cr | g->intercept_cr;
        c->intercept_dr = h->intercept_dr | g->intercept_dr;
-       c->intercept_exceptions =
-               h_intercept_exceptions | g->intercept_exceptions;
+       c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
        c->intercept = h->intercept | g->intercept;
 }
 
@@ -2202,7 +2196,6 @@ static int ud_interception(struct vcpu_svm *svm)
 {
        int er;
 
-       WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
        er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
        if (er == EMULATE_USER_EXIT)
                return 0;
index 5c14d65f676a99028b21108e272b9aa44bfe351c..3f89f6783aa57d1d20f5f45b025a61bc9cb8465a 100644 (file)
@@ -899,8 +899,16 @@ static inline short vmcs_field_to_offset(unsigned long field)
 {
        BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
 
-       if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
-           vmcs_field_to_offset_table[field] == 0)
+       if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
+               return -ENOENT;
+
+       /*
+        * FIXME: Mitigation for CVE-2017-5753.  To be replaced with a
+        * generic mechanism.
+        */
+       asm("lfence");
+
+       if (vmcs_field_to_offset_table[field] == 0)
                return -ENOENT;
 
        return vmcs_field_to_offset_table[field];
@@ -1887,7 +1895,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 {
        u32 eb;
 
-       eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) |
+       eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
             (1u << DB_VECTOR) | (1u << AC_VECTOR);
        if ((vcpu->guest_debug &
             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -1905,8 +1913,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
         */
        if (is_guest_mode(vcpu))
                eb |= get_vmcs12(vcpu)->exception_bitmap;
-       else
-               eb |= 1u << UD_VECTOR;
 
        vmcs_write32(EXCEPTION_BITMAP, eb);
 }
@@ -5917,7 +5923,6 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                return 1;  /* already handled by vmx_vcpu_run() */
 
        if (is_invalid_opcode(intr_info)) {
-               WARN_ON_ONCE(is_guest_mode(vcpu));
                er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
                if (er == EMULATE_USER_EXIT)
                        return 0;
index 4d62c071b166f65c848a12ca07bfe44ca20e198a..d85076223a696d0b00bee7c22a9e28b1e66dc975 100644 (file)
@@ -1325,20 +1325,18 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 {
        struct {
                struct mmuext_op op;
-#ifdef CONFIG_SMP
-               DECLARE_BITMAP(mask, num_processors);
-#else
                DECLARE_BITMAP(mask, NR_CPUS);
-#endif
        } *args;
        struct multicall_space mcs;
+       const size_t mc_entry_size = sizeof(args->op) +
+               sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
 
        trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
 
        if (cpumask_empty(cpus))
                return;         /* nothing to do */
 
-       mcs = xen_mc_entry(sizeof(*args));
+       mcs = xen_mc_entry(mc_entry_size);
        args = mcs.args;
        args->op.arg2.vcpumask = to_cpumask(args->mask);
 
index 75011b80660f6262206b6759e7b63a06a1d809ea..3b34745d0a52dd097d9c2d8955bd4afce1d24ee1 100644 (file)
@@ -72,7 +72,7 @@ u64 xen_clocksource_read(void);
 void xen_setup_cpu_clockevents(void);
 void xen_save_time_memory_area(void);
 void xen_restore_time_memory_area(void);
-void __init xen_init_time_ops(void);
+void __ref xen_init_time_ops(void);
 void __init xen_hvm_init_time_ops(void);
 
 irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
index b8881750a3acd705789050c845c942673da999a8..3ba4326a63b59632fad81e686bf8229eee07320b 100644 (file)
@@ -562,6 +562,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
        }
 }
 
+void blk_drain_queue(struct request_queue *q)
+{
+       spin_lock_irq(q->queue_lock);
+       __blk_drain_queue(q, true);
+       spin_unlock_irq(q->queue_lock);
+}
+
 /**
  * blk_queue_bypass_start - enter queue bypass mode
  * @q: queue of interest
@@ -689,8 +696,6 @@ void blk_cleanup_queue(struct request_queue *q)
         */
        blk_freeze_queue(q);
        spin_lock_irq(lock);
-       if (!q->mq_ops)
-               __blk_drain_queue(q, true);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
index 11097477eeab6591088ca817d4690535e114e699..3d379732749175ece7ae39e427e115f51621c521 100644 (file)
@@ -161,6 +161,8 @@ void blk_freeze_queue(struct request_queue *q)
         * exported to drivers as the only user for unfreeze is blk_mq.
         */
        blk_freeze_queue_start(q);
+       if (!q->mq_ops)
+               blk_drain_queue(q);
        blk_mq_freeze_queue_wait(q);
 }
 
index 3f1446937aece26f38ceb66cf4e3d159a23df871..442098aa9463a37dad0dfccb1718eea65be6cdb3 100644 (file)
@@ -330,4 +330,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
 }
 #endif /* CONFIG_BOUNCE */
 
+extern void blk_drain_queue(struct request_queue *q);
+
 #endif /* BLK_INTERNAL_H */
index 60d7366ed343e9ad6a76b9dd36be22fbd7543c89..9a636f961572b99af844b1d8a28bebfd9463c9e2 100644 (file)
@@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
 
                        spawn->alg = NULL;
                        spawns = &inst->alg.cra_users;
+
+                       /*
+                        * We may encounter an unregistered instance here, since
+                        * an instance's spawns are set up prior to the instance
+                        * being registered.  An unregistered instance will have
+                        * NULL ->cra_users.next, since ->cra_users isn't
+                        * properly initialized until registration.  But an
+                        * unregistered instance cannot have any users, so treat
+                        * it the same as ->cra_users being empty.
+                        */
+                       if (spawns->next == NULL)
+                               break;
                }
        } while ((spawns = crypto_more_spawns(alg, &stack, &top,
                                              &secondary_spawns)));
index bc8e61506968a0c3da43f2e4396f0cc9550b0d51..d5fe720cf14940b668f8764de2bad6cf95549528 100644 (file)
@@ -1581,9 +1581,8 @@ out:
        return err;
 }
 
-static void lo_release(struct gendisk *disk, fmode_t mode)
+static void __lo_release(struct loop_device *lo)
 {
-       struct loop_device *lo = disk->private_data;
        int err;
 
        if (atomic_dec_return(&lo->lo_refcnt))
@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
        mutex_unlock(&lo->lo_ctl_mutex);
 }
 
+static void lo_release(struct gendisk *disk, fmode_t mode)
+{
+       mutex_lock(&loop_index_mutex);
+       __lo_release(disk->private_data);
+       mutex_unlock(&loop_index_mutex);
+}
+
 static const struct block_device_operations lo_fops = {
        .owner =        THIS_MODULE,
        .open =         lo_open,
index 38fc5f397fdede04ebaf4f3f9c2e89118ee6b235..cc93522a6d419dc77902bab085306c2b316f6c16 100644 (file)
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
        mutex_unlock(&rbd_dev->watch_mutex);
 }
 
+static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
+{
+       struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+
+       strcpy(rbd_dev->lock_cookie, cookie);
+       rbd_set_owner_cid(rbd_dev, &cid);
+       queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+}
+
 /*
  * lock_rwsem must be held for write
  */
 static int rbd_lock(struct rbd_device *rbd_dev)
 {
        struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
-       struct rbd_client_id cid = rbd_get_cid(rbd_dev);
        char cookie[32];
        int ret;
 
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
                return ret;
 
        rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
-       strcpy(rbd_dev->lock_cookie, cookie);
-       rbd_set_owner_cid(rbd_dev, &cid);
-       queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+       __rbd_lock(rbd_dev, cookie);
        return 0;
 }
 
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
                        queue_delayed_work(rbd_dev->task_wq,
                                           &rbd_dev->lock_dwork, 0);
        } else {
-               strcpy(rbd_dev->lock_cookie, cookie);
+               __rbd_lock(rbd_dev, cookie);
        }
 }
 
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        segment_size = rbd_obj_bytes(&rbd_dev->header);
        blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
        q->limits.max_sectors = queue_max_hw_sectors(q);
-       blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
+       blk_queue_max_segments(q, USHRT_MAX);
        blk_queue_max_segment_size(q, segment_size);
        blk_queue_io_min(q, segment_size);
        blk_queue_io_opt(q, segment_size);
index 44332b793718afe1092f5c1a1f42a088b52ab6fb..14532d9576e4239ff60a193bec13a1a819c338b8 100644 (file)
@@ -2892,6 +2892,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
 }
 EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
 
+/**
+ * gpiod_set_value_nocheck() - set a GPIO line value without checking
+ * @desc: the descriptor to set the value on
+ * @value: value to set
+ *
+ * This sets the value of a GPIO line backing a descriptor, applying
+ * different semantic quirks like active low and open drain/source
+ * handling.
+ */
+static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
+{
+       if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+               value = !value;
+       if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+               gpio_set_open_drain_value_commit(desc, value);
+       else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+               gpio_set_open_source_value_commit(desc, value);
+       else
+               gpiod_set_raw_value_commit(desc, value);
+}
+
 /**
  * gpiod_set_value() - assign a gpio's value
  * @desc: gpio whose value will be assigned
@@ -2906,16 +2927,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
 void gpiod_set_value(struct gpio_desc *desc, int value)
 {
        VALIDATE_DESC_VOID(desc);
-       /* Should be using gpiod_set_value_cansleep() */
        WARN_ON(desc->gdev->chip->can_sleep);
-       if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
-               value = !value;
-       if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
-               gpio_set_open_drain_value_commit(desc, value);
-       else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
-               gpio_set_open_source_value_commit(desc, value);
-       else
-               gpiod_set_raw_value_commit(desc, value);
+       gpiod_set_value_nocheck(desc, value);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_value);
 
@@ -3243,9 +3256,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
 {
        might_sleep_if(extra_checks);
        VALIDATE_DESC_VOID(desc);
-       if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
-               value = !value;
-       gpiod_set_raw_value_commit(desc, value);
+       gpiod_set_value_nocheck(desc, value);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
 
index 85d4c57870fb7a2c577803d12e3c0bf219cf056f..49af94627c8a1e1f3446dad2ebb625075112ba07 100644 (file)
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 }
 
 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
-               unsigned int opcode, int rings)
+               unsigned int opcode, unsigned long rings)
 {
        struct cmd_info *info = NULL;
        unsigned int ring;
 
-       for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+       for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
                info = find_cmd_entry(gvt, opcode, ring);
                if (info)
                        break;
index 8e331142badbcbad4ceebbd0c0b2e2fa2fb8584c..64d67ff9bf084a3c02e3c2f808ed9af066208f97 100644 (file)
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
                        return ret;
        } else {
                if (!test_bit(index, spt->post_shadow_bitmap)) {
+                       int type = spt->shadow_page.type;
+
                        ppgtt_get_shadow_entry(spt, &se, index);
                        ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
                        if (ret)
                                return ret;
+                       ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
+                       ppgtt_set_shadow_entry(spt, &se, index);
                }
-
                ppgtt_set_post_shadow(spt, index);
        }
 
index 18de6569d04aef46aad4af88edb20f0a94d91b53..5cfba89ed586391409cf02459344b668c9c181bc 100644 (file)
@@ -467,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
        struct drm_i915_gem_request *rq;
        struct intel_engine_cs *engine;
 
-       if (!dma_fence_is_i915(fence))
+       if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
                return;
 
        rq = to_request(fence);
index 333f40bc03bb052adddb75c29e48305a51e421e1..7923dfd9963c6e40b0e7e84d8a9d9bf0cca87a1f 100644 (file)
@@ -7027,6 +7027,8 @@ enum {
 #define GEN9_SLICE_COMMON_ECO_CHICKEN0         _MMIO(0x7308)
 #define  DISABLE_PIXEL_MASK_CAMMING            (1<<14)
 
+#define GEN9_SLICE_COMMON_ECO_CHICKEN1         _MMIO(0x731c)
+
 #define GEN7_L3SQCREG1                         _MMIO(0xB010)
 #define  VLV_B0_WA_L3SQCREG1_VALUE             0x00D30000
 
index ab5bf4e2e28e21f3daa7072a37cb6895aba8233c..6074e04dc99fca3d269d78ac2aa4e9159e78aedb 100644 (file)
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
        if (ret)
                return ret;
 
+       /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+       ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+       if (ret)
+               return ret;
+
        /* WaToEnableHwFixForPushConstHWBug:glk */
        WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
                          GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
index d36e2560743545cef7b32b24adb794e719ca5da2..e71a8cd50498c338aa0e0f3c03c2b62e0e8823b8 100644 (file)
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 
        GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
 
+       if (i915_gem_request_completed(request))
+               return;
+
        if (prio <= READ_ONCE(request->priotree.priority))
                return;
 
index a2978a37b4f3c72ef4e6f03b1fb7f2c0c7500478..700fc754f28a4c32a001c243822b5e97733ba0a7 100644 (file)
@@ -174,6 +174,7 @@ gf119_sor = {
                .links = gf119_sor_dp_links,
                .power = g94_sor_dp_power,
                .pattern = gf119_sor_dp_pattern,
+               .drive = gf119_sor_dp_drive,
                .vcpi = gf119_sor_dp_vcpi,
                .audio = gf119_sor_dp_audio,
                .audio_sym = gf119_sor_dp_audio_sym,
index b0a1dedac8026e0ad89ee1227cb7b7881d3fee7d..476079f1255f6c51e5cba11ff6f05f1c315a691e 100644 (file)
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
                                name, err);
                        goto remove;
                }
+       } else {
+               /* fall back to the module clock on SOR0 (eDP/LVDS only) */
+               sor->clk_out = sor->clk;
        }
 
        sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
index 26eddbb628936b91f20a000c405bfbc536324e89..3dd62d75f5319dbffcd9d27ea6f60927d193eb92 100644 (file)
@@ -209,9 +209,6 @@ vc4_irq_postinstall(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
-       /* Undo the effects of a previous vc4_irq_uninstall. */
-       enable_irq(dev->irq);
-
        /* Enable both the render done and out of memory interrupts. */
        V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
 
index 622cd43840b8c53588396b6d1d82b5d234a69113..493f392b3a0a90e70820d3582c0847ebe3cc0bd7 100644 (file)
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
                return ret;
 
        vc4_v3d_init_hw(vc4->dev);
+
+       /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
+       enable_irq(vc4->dev->irq);
        vc4_irq_postinstall(vc4->dev);
 
        return 0;
index 21c62a34e5580af7e56505a64bbae48707ae2599..87e8af5776a389166278e24c6087a49f03682dbb 100644 (file)
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
        }
 
        view_type = vmw_view_cmd_to_type(header->id);
+       if (view_type == vmw_view_max)
+               return -EINVAL;
        cmd = container_of(header, typeof(*cmd), header);
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter,
index 0545740b3724f1d6c28b9e1dd169351fdd723949..641294aef1652e1d8599016090aacd4bb33364b0 100644 (file)
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
        vps->pinned = 0;
 
        /* Mapping is managed by prepare_fb/cleanup_fb */
-       memset(&vps->guest_map, 0, sizeof(vps->guest_map));
        memset(&vps->host_map, 0, sizeof(vps->host_map));
        vps->cpp = 0;
 
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
 
 
        /* Should have been freed by cleanup_fb */
-       if (vps->guest_map.virtual) {
-               DRM_ERROR("Guest mapping not freed\n");
-               ttm_bo_kunmap(&vps->guest_map);
-       }
-
        if (vps->host_map.virtual) {
                DRM_ERROR("Host mapping not freed\n");
                ttm_bo_kunmap(&vps->host_map);
index ff9c8389ff21c3b2923c8387455976d5425e33f3..cd9da2dd79af1a062d4aaa3d6b6ded468bd4207e 100644 (file)
@@ -175,7 +175,7 @@ struct vmw_plane_state {
        int pinned;
 
        /* For CPU Blit */
-       struct ttm_bo_kmap_obj host_map, guest_map;
+       struct ttm_bo_kmap_obj host_map;
        unsigned int cpp;
 };
 
index 90b5437fd787e0de7d360b5b697fb33d4a8551c3..b68d74888ab1100be82f8a2a9fdc3234a4e04293 100644 (file)
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit {
        bool defined;
 
        /* For CPU Blit */
-       struct ttm_bo_kmap_obj host_map, guest_map;
+       struct ttm_bo_kmap_obj host_map;
        unsigned int cpp;
 };
 
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        s32 src_pitch, dst_pitch;
        u8 *src, *dst;
        bool not_used;
-
+       struct ttm_bo_kmap_obj guest_map;
+       int ret;
 
        if (!dirty->num_hits)
                return;
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        if (width == 0 || height == 0)
                return;
 
+       ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
+                         &guest_map);
+       if (ret) {
+               DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
+                         ret);
+               goto out_cleanup;
+       }
 
        /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
        src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
 
        dst_pitch = ddirty->pitch;
-       dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
+       dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
        dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
 
 
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
                vmw_fifo_commit(dev_priv, sizeof(*cmd));
        }
 
+       ttm_bo_kunmap(&guest_map);
 out_cleanup:
        ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
        ddirty->right = ddirty->bottom = S32_MIN;
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
 {
        struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 
-       if (vps->guest_map.virtual)
-               ttm_bo_kunmap(&vps->guest_map);
-
        if (vps->host_map.virtual)
                ttm_bo_kunmap(&vps->host_map);
 
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
         */
        if (vps->content_fb_type == SEPARATE_DMA &&
            !(dev_priv->capabilities & SVGA_CAP_3D)) {
-
-               struct vmw_framebuffer_dmabuf *new_vfbd;
-
-               new_vfbd = vmw_framebuffer_to_vfbd(new_fb);
-
-               ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false,
-                                    NULL);
-               if (ret)
-                       goto out_srf_unpin;
-
-               ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0,
-                                 new_vfbd->buffer->base.num_pages,
-                                 &vps->guest_map);
-
-               ttm_bo_unreserve(&new_vfbd->buffer->base);
-
-               if (ret) {
-                       DRM_ERROR("Failed to map content buffer to CPU\n");
-                       goto out_srf_unpin;
-               }
-
                ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
                                  vps->surf->res.backup->base.num_pages,
                                  &vps->host_map);
                if (ret) {
                        DRM_ERROR("Failed to map display buffer to CPU\n");
-                       ttm_bo_kunmap(&vps->guest_map);
                        goto out_srf_unpin;
                }
 
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
        stdu->display_srf = vps->surf;
        stdu->content_fb_type = vps->content_fb_type;
        stdu->cpp = vps->cpp;
-       memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map));
        memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
 
        if (!stdu->defined)
index a1d687a664f85edc4eeb1bf50360cb02c65faa45..66f0268f37a6ca143f558a6c02080c1b702afcc3 100644 (file)
@@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
 }
 #endif
 
-struct ib_device *__ib_device_get_by_index(u32 ifindex);
+struct ib_device *ib_device_get_by_index(u32 ifindex);
 /* RDMA device netlink */
 void nldev_init(void);
 void nldev_exit(void);
index 30914f3baa5f1ee5b43695c96f56c54b8ce2a2df..465520627e4b6b93c85aae4c276be44607eb7400 100644 (file)
@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
        return 0;
 }
 
-struct ib_device *__ib_device_get_by_index(u32 index)
+static struct ib_device *__ib_device_get_by_index(u32 index)
 {
        struct ib_device *device;
 
@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
        return NULL;
 }
 
+/*
+ * Caller is responsible to return refrerence count by calling put_device()
+ */
+struct ib_device *ib_device_get_by_index(u32 index)
+{
+       struct ib_device *device;
+
+       down_read(&lists_rwsem);
+       device = __ib_device_get_by_index(index);
+       if (device)
+               get_device(&device->dev);
+
+       up_read(&lists_rwsem);
+       return device;
+}
+
 static struct ib_device *__ib_device_get_by_name(const char *name)
 {
        struct ib_device *device;
index 9a05245a1acf4a7fd010fb7f038e84d7bbf5003e..0dcd1aa6f683e8bc73901ecb4fee08c8a167801e 100644 (file)
@@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
 
-       device = __ib_device_get_by_index(index);
+       device = ib_device_get_by_index(index);
        if (!device)
                return -EINVAL;
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!msg)
-               return -ENOMEM;
+       if (!msg) {
+               err = -ENOMEM;
+               goto err;
+       }
 
        nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
                        RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
                        0, 0);
 
        err = fill_dev_info(msg, device);
-       if (err) {
-               nlmsg_free(msg);
-               return err;
-       }
+       if (err)
+               goto err_free;
 
        nlmsg_end(msg, nlh);
 
+       put_device(&device->dev);
        return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+       nlmsg_free(msg);
+err:
+       put_device(&device->dev);
+       return err;
 }
 
 static int _nldev_get_dumpit(struct ib_device *device,
@@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
                return -EINVAL;
 
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = __ib_device_get_by_index(index);
+       device = ib_device_get_by_index(index);
        if (!device)
                return -EINVAL;
 
        port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
-       if (!rdma_is_port_valid(device, port))
-               return -EINVAL;
+       if (!rdma_is_port_valid(device, port)) {
+               err = -EINVAL;
+               goto err;
+       }
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!msg)
-               return -ENOMEM;
+       if (!msg) {
+               err = -ENOMEM;
+               goto err;
+       }
 
        nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
                        RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
                        0, 0);
 
        err = fill_port_info(msg, device, port);
-       if (err) {
-               nlmsg_free(msg);
-               return err;
-       }
+       if (err)
+               goto err_free;
 
        nlmsg_end(msg, nlh);
+       put_device(&device->dev);
 
        return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+       nlmsg_free(msg);
+err:
+       put_device(&device->dev);
+       return err;
 }
 
 static int nldev_port_get_dumpit(struct sk_buff *skb,
@@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
                return -EINVAL;
 
        ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = __ib_device_get_by_index(ifindex);
+       device = ib_device_get_by_index(ifindex);
        if (!device)
                return -EINVAL;
 
@@ -299,7 +315,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
                nlmsg_end(skb, nlh);
        }
 
-out:   cb->args[0] = idx;
+out:
+       put_device(&device->dev);
+       cb->args[0] = idx;
        return skb->len;
 }
 
index 313bfb9ccb71a3b42ebacbaa9b98b91daca51a19..4975f3e6596e49aaa8586ad9a13da719ef4dd6ab 100644 (file)
@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
                goto err_free_mr;
 
        mr->max_pages = max_num_sg;
-
        err = mlx4_mr_enable(dev->dev, &mr->mmr);
        if (err)
                goto err_free_pl;
@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
        return &mr->ibmr;
 
 err_free_pl:
+       mr->ibmr.device = pd->device;
        mlx4_free_priv_pages(mr);
 err_free_mr:
        (void) mlx4_mr_free(dev->dev, &mr->mmr);
index 12b7f911f0e5b9b9fce4f034bde66db4fcc26e81..8880351df179382fa6d64b4b6e698c2fdb563e94 100644 (file)
@@ -902,8 +902,8 @@ static int path_rec_start(struct net_device *dev,
        return 0;
 }
 
-static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
-                          struct net_device *dev)
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+                                         struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
        struct rdma_netdev *rn = netdev_priv(dev);
@@ -917,7 +917,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
                spin_unlock_irqrestore(&priv->lock, flags);
                ++dev->stats.tx_dropped;
                dev_kfree_skb_any(skb);
-               return;
+               return NULL;
+       }
+
+       /* To avoid race condition, make sure that the
+        * neigh will be added only once.
+        */
+       if (unlikely(!list_empty(&neigh->list))) {
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return neigh;
        }
 
        path = __path_find(dev, daddr + 4);
@@ -956,7 +964,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
                        path->ah->last_send = rn->send(dev, skb, path->ah->ah,
                                                       IPOIB_QPN(daddr));
                        ipoib_neigh_put(neigh);
-                       return;
+                       return NULL;
                }
        } else {
                neigh->ah  = NULL;
@@ -973,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
 
        spin_unlock_irqrestore(&priv->lock, flags);
        ipoib_neigh_put(neigh);
-       return;
+       return NULL;
 
 err_path:
        ipoib_neigh_free(neigh);
@@ -983,6 +991,8 @@ err_drop:
 
        spin_unlock_irqrestore(&priv->lock, flags);
        ipoib_neigh_put(neigh);
+
+       return NULL;
 }
 
 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1091,8 +1101,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
        case htons(ETH_P_TIPC):
                neigh = ipoib_neigh_get(dev, phdr->hwaddr);
                if (unlikely(!neigh)) {
-                       neigh_add_path(skb, phdr->hwaddr, dev);
-                       return NETDEV_TX_OK;
+                       neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+                       if (likely(!neigh))
+                               return NETDEV_TX_OK;
                }
                break;
        case htons(ETH_P_ARP):
index 93e149efc1f5fc0382b61dcfc9f84d786d8b52ca..9b3f47ae201603e8bc4e1527a0b3546a0de69878 100644 (file)
@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
                spin_lock_irqsave(&priv->lock, flags);
                if (!neigh) {
                        neigh = ipoib_neigh_alloc(daddr, dev);
-                       if (neigh) {
+                       /* Make sure that the neigh will be added only
+                        * once to mcast list.
+                        */
+                       if (neigh && list_empty(&neigh->list)) {
                                kref_get(&mcast->ah->ref);
                                neigh->ah       = mcast->ah;
                                list_add_tail(&neigh->list, &mcast->neigh_list);
index 8a1bd354b1cc1cb195d77be4d545a11c1bc93ac4..bfa576aa9f03c75727b9b61db67d47bfc8846797 100644 (file)
@@ -1013,8 +1013,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
                return -ENOMEM;
 
        attr->qp_state = IB_QPS_INIT;
-       attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
-           IB_ACCESS_REMOTE_WRITE;
+       attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
        attr->port_num = ch->sport->port;
        attr->pkey_index = 0;
 
@@ -2078,7 +2077,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
                goto destroy_ib;
        }
 
-       guid = (__be16 *)&param->primary_path->sgid.global.interface_id;
+       guid = (__be16 *)&param->primary_path->dgid.global.interface_id;
        snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
                 be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
                 be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
index fcf7235d5742ae443bdc629d293983db5b60312d..157e1d9e7725a050f64c32ebed135b46ae0ee58d 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/mmc/host.h>
@@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev)
        return 0;
 }
 EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
+
+MODULE_LICENSE("GPL v2");
index f7f157a62a4a7dfa7155b5fe9933ead9f870ad1b..555c7f133eb8a5246ff3534d1899927463e99dc2 100644 (file)
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = {
 struct s3cmci_reg {
        unsigned short  addr;
        unsigned char   *name;
-} debug_regs[] = {
+};
+
+static const struct s3cmci_reg debug_regs[] = {
        DBG_REG(CON),
        DBG_REG(PRE),
        DBG_REG(CMDARG),
@@ -1446,7 +1448,7 @@ struct s3cmci_reg {
 static int s3cmci_regs_show(struct seq_file *seq, void *v)
 {
        struct s3cmci_host *host = seq->private;
-       struct s3cmci_reg *rptr = debug_regs;
+       const struct s3cmci_reg *rptr = debug_regs;
 
        for (; rptr->name; rptr++)
                seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
index 0626dcfd1f3d83ceaad91968cffd65370189cfac..760d2c07e3a2c1edd15a2f70d20d1272adb3fd8a 100644 (file)
@@ -526,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
                data = be32_to_cpup((__be32 *)&cf->data[0]);
                flexcan_write(data, &priv->tx_mb->data[0]);
        }
-       if (cf->can_dlc > 3) {
+       if (cf->can_dlc > 4) {
                data = be32_to_cpup((__be32 *)&cf->data[4]);
                flexcan_write(data, &priv->tx_mb->data[1]);
        }
index b00358297424604634489a9b106bf313c06b7fc9..12ff0020ecd60904b65b89a633cd2a60bf880ed2 100644 (file)
@@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
 
                if (dev->can.state == CAN_STATE_ERROR_WARNING ||
                    dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+                       cf->can_id |= CAN_ERR_CRTL;
                        cf->data[1] = (txerr > rxerr) ?
                            CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
                }
index 68ac3e88a8cecbe5b4a58da8491756ad5c26039a..8bf80ad9dc44cf4622811bc1fab31404c50cfb95 100644 (file)
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
                dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
                        rc);
 
-       return rc;
+       return (rc > 0) ? 0 : rc;
 }
 
 static void gs_usb_xmit_callback(struct urb *urb)
index 8404e8852a0f96df80d7cb3d1059e64890405949..b4c4a2c764378e17131234f6b9481d76be2db786 100644 (file)
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
                tbp = peer_tb;
        }
 
-       if (tbp[IFLA_IFNAME]) {
+       if (ifmp && tbp[IFLA_IFNAME]) {
                nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
                name_assign_type = NET_NAME_USER;
        } else {
index f5a8dd96fd75f273f6a31647087bd0c5e0b4fd47..4498ab897d94b9ea86f5fb83fd5eda98bd88e263 100644 (file)
@@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
 {
        struct b53_device *dev = ds->priv;
 
-       /* Older models support a different tag format that we do not
-        * support in net/dsa/tag_brcm.c yet.
+       /* Older models (5325, 5365) support a different tag format that we do
+        * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
+        * mode to be turned on which means we need to specifically manage ARL
+        * misses on multicast addresses (TBD).
         */
-       if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
+       if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
+           !b53_can_enable_brcm_tags(ds, port))
                return DSA_TAG_PROTO_NONE;
 
        /* Broadcom BCM58xx chips have a flow accelerator on Port 8
index f4e13a7014bde0a1cd4f60b4b60a5a1b0459d09e..36c8950dbd2d80699f396f217f0f438479f68355 100644 (file)
@@ -602,7 +602,7 @@ struct vortex_private {
        struct sk_buff* rx_skbuff[RX_RING_SIZE];
        struct sk_buff* tx_skbuff[TX_RING_SIZE];
        unsigned int cur_rx, cur_tx;            /* The next free ring entry */
-       unsigned int dirty_rx, dirty_tx;        /* The ring entries to be free()ed. */
+       unsigned int dirty_tx;  /* The ring entries to be free()ed. */
        struct vortex_extra_stats xstats;       /* NIC-specific extra stats */
        struct sk_buff *tx_skb;                         /* Packet being eaten by bus master ctrl.  */
        dma_addr_t tx_skb_dma;                          /* Allocated DMA address for bus master ctrl DMA.   */
@@ -618,7 +618,6 @@ struct vortex_private {
 
        /* The remainder are related to chip state, mostly media selection. */
        struct timer_list timer;                        /* Media selection timer. */
-       struct timer_list rx_oom_timer;         /* Rx skb allocation retry timer */
        int options;                                            /* User-settable misc. driver options. */
        unsigned int media_override:4,          /* Passed-in media type. */
                default_media:4,                                /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
 static int mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
 static void vortex_timer(struct timer_list *t);
-static void rx_oom_timer(struct timer_list *t);
 static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
                                     struct net_device *dev);
 static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
 
        timer_setup(&vp->timer, vortex_timer, 0);
        mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
-       timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
 
        if (vortex_debug > 1)
                pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
        window_write16(vp, 0x0040, 4, Wn4_NetDiag);
 
        if (vp->full_bus_master_rx) { /* Boomerang bus master. */
-               vp->cur_rx = vp->dirty_rx = 0;
+               vp->cur_rx = 0;
                /* Initialize the RxEarly register as recommended. */
                iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
                iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
        struct vortex_private *vp = netdev_priv(dev);
        int i;
        int retval;
+       dma_addr_t dma;
 
        /* Use the now-standard shared IRQ implementation. */
        if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
                                break;                  /* Bad news!  */
 
                        skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
-                       vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+                       dma = pci_map_single(VORTEX_PCI(vp), skb->data,
+                                            PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+                       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
+                               break;
+                       vp->rx_ring[i].addr = cpu_to_le32(dma);
                }
                if (i != RX_RING_SIZE) {
                        pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
                int len = (skb->len + 3) & ~3;
                vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
                                                PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
+                       dev_kfree_skb_any(skb);
+                       dev->stats.tx_dropped++;
+                       return NETDEV_TX_OK;
+               }
+
                spin_lock_irq(&vp->window_lock);
                window_set(vp, 7);
                iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
        int entry = vp->cur_rx % RX_RING_SIZE;
        void __iomem *ioaddr = vp->ioaddr;
        int rx_status;
-       int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+       int rx_work_limit = RX_RING_SIZE;
 
        if (vortex_debug > 5)
                pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
                } else {
                        /* The packet length: up to 4.5K!. */
                        int pkt_len = rx_status & 0x1fff;
-                       struct sk_buff *skb;
+                       struct sk_buff *skb, *newskb;
+                       dma_addr_t newdma;
                        dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
 
                        if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
                                pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
                                vp->rx_copy++;
                        } else {
+                               /* Pre-allocate the replacement skb.  If it or its
+                                * mapping fails then recycle the buffer thats already
+                                * in place
+                                */
+                               newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
+                               if (!newskb) {
+                                       dev->stats.rx_dropped++;
+                                       goto clear_complete;
+                               }
+                               newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
+                                                       PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+                               if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
+                                       dev->stats.rx_dropped++;
+                                       consume_skb(newskb);
+                                       goto clear_complete;
+                               }
+
                                /* Pass up the skbuff already on the Rx ring. */
                                skb = vp->rx_skbuff[entry];
-                               vp->rx_skbuff[entry] = NULL;
+                               vp->rx_skbuff[entry] = newskb;
+                               vp->rx_ring[entry].addr = cpu_to_le32(newdma);
                                skb_put(skb, pkt_len);
                                pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
                                vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
                        netif_rx(skb);
                        dev->stats.rx_packets++;
                }
-               entry = (++vp->cur_rx) % RX_RING_SIZE;
-       }
-       /* Refill the Rx ring buffers. */
-       for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
-               struct sk_buff *skb;
-               entry = vp->dirty_rx % RX_RING_SIZE;
-               if (vp->rx_skbuff[entry] == NULL) {
-                       skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
-                       if (skb == NULL) {
-                               static unsigned long last_jif;
-                               if (time_after(jiffies, last_jif + 10 * HZ)) {
-                                       pr_warn("%s: memory shortage\n",
-                                               dev->name);
-                                       last_jif = jiffies;
-                               }
-                               if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
-                                       mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
-                               break;                  /* Bad news!  */
-                       }
 
-                       vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
-                       vp->rx_skbuff[entry] = skb;
-               }
+clear_complete:
                vp->rx_ring[entry].status = 0;  /* Clear complete bit. */
                iowrite16(UpUnstall, ioaddr + EL3_CMD);
+               entry = (++vp->cur_rx) % RX_RING_SIZE;
        }
        return 0;
 }
 
-/*
- * If we've hit a total OOM refilling the Rx ring we poll once a second
- * for some memory.  Otherwise there is no way to restart the rx process.
- */
-static void
-rx_oom_timer(struct timer_list *t)
-{
-       struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
-       struct net_device *dev = vp->mii.dev;
-
-       spin_lock_irq(&vp->lock);
-       if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)        /* This test is redundant, but makes me feel good */
-               boomerang_rx(dev);
-       if (vortex_debug > 1) {
-               pr_debug("%s: rx_oom_timer %s\n", dev->name,
-                       ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
-       }
-       spin_unlock_irq(&vp->lock);
-}
-
 static void
 vortex_down(struct net_device *dev, int final_down)
 {
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
        netdev_reset_queue(dev);
        netif_stop_queue(dev);
 
-       del_timer_sync(&vp->rx_oom_timer);
        del_timer_sync(&vp->timer);
 
        /* Turn off statistics ASAP.  We update dev->stats below. */
index 97c5a89a9cf7a4f1a65dbef70d889efd7d661c20..fbe21a817bd8a2c9ca01a2be6e8c6948b704ef87 100644 (file)
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
 
 static int ena_rss_init_default(struct ena_adapter *adapter);
+static void check_for_admin_com_state(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter);
+static int ena_restore_device(struct ena_adapter *adapter);
 
 static void ena_tx_timeout(struct net_device *dev)
 {
@@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
 
 static int ena_up_complete(struct ena_adapter *adapter)
 {
-       int rc, i;
+       int rc;
 
        rc = ena_rss_configure(adapter);
        if (rc)
@@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
 
        ena_napi_enable_all(adapter);
 
-       /* Enable completion queues interrupt */
-       for (i = 0; i < adapter->num_queues; i++)
-               ena_unmask_interrupt(&adapter->tx_ring[i],
-                                    &adapter->rx_ring[i]);
-
-       /* schedule napi in case we had pending packets
-        * from the last time we disable napi
-        */
-       for (i = 0; i < adapter->num_queues; i++)
-               napi_schedule(&adapter->ena_napi[i].napi);
-
        return 0;
 }
 
@@ -1731,7 +1723,7 @@ create_err:
 
 static int ena_up(struct ena_adapter *adapter)
 {
-       int rc;
+       int rc, i;
 
        netdev_dbg(adapter->netdev, "%s\n", __func__);
 
@@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)
 
        set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
 
+       /* Enable completion queues interrupt */
+       for (i = 0; i < adapter->num_queues; i++)
+               ena_unmask_interrupt(&adapter->tx_ring[i],
+                                    &adapter->rx_ring[i]);
+
+       /* schedule napi in case we had pending packets
+        * from the last time we disable napi
+        */
+       for (i = 0; i < adapter->num_queues; i++)
+               napi_schedule(&adapter->ena_napi[i].napi);
+
        return rc;
 
 err_up:
@@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)
        if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
                ena_down(adapter);
 
+       /* Check for device status and issue reset if needed*/
+       check_for_admin_com_state(adapter);
+       if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+               netif_err(adapter, ifdown, adapter->netdev,
+                         "Destroy failure, restarting device\n");
+               ena_dump_stats_to_dmesg(adapter);
+               /* rtnl lock already obtained in dev_ioctl() layer */
+               ena_destroy_device(adapter);
+               ena_restore_device(adapter);
+       }
+
        return 0;
 }
 
@@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
 
        ena_com_set_admin_running_state(ena_dev, false);
 
-       ena_close(netdev);
+       if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+               ena_down(adapter);
 
        /* Before releasing the ENA resources, a device reset is required.
         * (to prevent the device from accessing them).
-        * In case the reset flag is set and the device is up, ena_close
+        * In case the reset flag is set and the device is up, ena_down()
         * already perform the reset, so it can be skipped.
         */
        if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
index 5ee18660bc33a2572320ac3210029e883841b044..c9617675f934b615b05f13e4e74c8d9f7aeeaabb 100644 (file)
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
                netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
                return -EINVAL;
        }
-       if (vf_id >= bp->pf.max_vfs) {
+       if (vf_id >= bp->pf.active_vfs) {
                netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
                return -EINVAL;
        }
index 3d201d7324bdc7b2c50377e5da5b3ab3acb8a423..d8fee26cd45eaf9691a323c5f5d6bfd8379eb3b8 100644 (file)
@@ -421,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
        }
 
        /* If all IP and L4 fields are wildcarded then this is an L2 flow */
-       if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
+       if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
            is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
                flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
        } else {
index 6f9fa6e3c42a0cc1988e82ae976cce17d45ca0e5..d8424ed16c33777e81c78bab929bda4b5c5aeb29 100644 (file)
@@ -344,7 +344,6 @@ struct adapter_params {
 
        unsigned int sf_size;             /* serial flash size in bytes */
        unsigned int sf_nsec;             /* # of flash sectors */
-       unsigned int sf_fw_start;         /* start of FW image in flash */
 
        unsigned int fw_vers;             /* firmware version */
        unsigned int bs_vers;             /* bootstrap version */
index f63210f155796c9809415d7da54f59086ebdbf7c..375ef86a84da1b08c0a3ff1dd2a6a5380600acb4 100644 (file)
@@ -2844,8 +2844,6 @@ enum {
        SF_RD_DATA_FAST = 0xb,        /* read flash */
        SF_RD_ID        = 0x9f,       /* read ID */
        SF_ERASE_SECTOR = 0xd8,       /* erase sector */
-
-       FW_MAX_SIZE = 16 * SF_SEC_SIZE,
 };
 
 /**
@@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
        const __be32 *p = (const __be32 *)fw_data;
        const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
        unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
-       unsigned int fw_img_start = adap->params.sf_fw_start;
-       unsigned int fw_start_sec = fw_img_start / sf_sec_size;
+       unsigned int fw_start_sec = FLASH_FW_START_SEC;
+       unsigned int fw_size = FLASH_FW_MAX_SIZE;
+       unsigned int fw_start = FLASH_FW_START;
 
        if (!size) {
                dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
                        "FW image size differs from size in FW header\n");
                return -EINVAL;
        }
-       if (size > FW_MAX_SIZE) {
+       if (size > fw_size) {
                dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
-                       FW_MAX_SIZE);
+                       fw_size);
                return -EFBIG;
        }
        if (!t4_fw_matches_chip(adap, hdr))
@@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
         */
        memcpy(first_page, fw_data, SF_PAGE_SIZE);
        ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
-       ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
+       ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
        if (ret)
                goto out;
 
-       addr = fw_img_start;
+       addr = fw_start;
        for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
                addr += SF_PAGE_SIZE;
                fw_data += SF_PAGE_SIZE;
@@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
        }
 
        ret = t4_write_flash(adap,
-                            fw_img_start + offsetof(struct fw_hdr, fw_ver),
+                            fw_start + offsetof(struct fw_hdr, fw_ver),
                             sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
 out:
        if (ret)
index 8184d2fca9be017b4d374e1ba5b3fab6e7d77e9e..a74300a4459c78d7797cff90962a1a7a394dccb5 100644 (file)
@@ -3469,6 +3469,10 @@ fec_probe(struct platform_device *pdev)
                        goto failed_regulator;
                }
        } else {
+               if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
+                       ret = -EPROBE_DEFER;
+                       goto failed_regulator;
+               }
                fep->reg_phy = NULL;
        }
 
@@ -3552,8 +3556,9 @@ failed_clk_ipg:
 failed_clk:
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
-failed_phy:
        of_node_put(phy_node);
+failed_phy:
+       dev_id--;
 failed_ioremap:
        free_netdev(ndev);
 
index 544114281ea754fc9c2221b8aa2f2b8d3ef842f6..9f8d4f8e57e30fa09ca1e8af3aa0d6a017633c1d 100644 (file)
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
        now = tmr_cnt_read(etsects);
        now += delta;
        tmr_cnt_write(etsects, now);
+       set_fipers(etsects);
 
        spin_unlock_irqrestore(&etsects->lock, flags);
 
-       set_fipers(etsects);
-
        return 0;
 }
 
index d7bdea79e9fa755f8a9c6ad63b0ed1eb1e3c63f5..8fd2458060a088d6475622e4ae68cd40df23c116 100644 (file)
@@ -331,7 +331,8 @@ struct e1000_adapter {
 enum e1000_state_t {
        __E1000_TESTING,
        __E1000_RESETTING,
-       __E1000_DOWN
+       __E1000_DOWN,
+       __E1000_DISABLED
 };
 
 #undef pr_fmt
index 1982f7917a8d5d68776b2e052ac1df809f11c065..3dd4aeb2706d393cd8fbf4998a7582d33b9bafcd 100644 (file)
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct net_device *netdev;
-       struct e1000_adapter *adapter;
+       struct e1000_adapter *adapter = NULL;
        struct e1000_hw *hw;
 
        static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        u16 tmp = 0;
        u16 eeprom_apme_mask = E1000_EEPROM_APME;
        int bars, need_ioport;
+       bool disable_dev = false;
 
        /* do not allocate ioport bars when not needed */
        need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
        iounmap(hw->ce4100_gbe_mdio_base_virt);
        iounmap(hw->hw_addr);
 err_ioremap:
+       disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
        free_netdev(netdev);
 err_alloc_etherdev:
        pci_release_selected_regions(pdev, bars);
 err_pci_reg:
-       pci_disable_device(pdev);
+       if (!adapter || disable_dev)
+               pci_disable_device(pdev);
        return err;
 }
 
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       bool disable_dev;
 
        e1000_down_and_stop(adapter);
        e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
                iounmap(hw->flash_address);
        pci_release_selected_regions(pdev, adapter->bars);
 
+       disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
        free_netdev(netdev);
 
-       pci_disable_device(pdev);
+       if (disable_dev)
+               pci_disable_device(pdev);
 }
 
 /**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
        if (netif_running(netdev))
                e1000_free_irq(adapter);
 
-       pci_disable_device(pdev);
+       if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+               pci_disable_device(pdev);
 
        return 0;
 }
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
                pr_err("Cannot enable PCI device from suspend\n");
                return err;
        }
+
+       /* flush memory to make sure state is correct */
+       smp_mb__before_atomic();
+       clear_bit(__E1000_DISABLED, &adapter->flags);
        pci_set_master(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 
        if (netif_running(netdev))
                e1000_down(adapter);
-       pci_disable_device(pdev);
+
+       if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+               pci_disable_device(pdev);
 
        /* Request a slot slot reset. */
        return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
                pr_err("Cannot re-enable PCI device after reset.\n");
                return PCI_ERS_RESULT_DISCONNECT;
        }
+
+       /* flush memory to make sure state is correct */
+       smp_mb__before_atomic();
+       clear_bit(__E1000_DISABLED, &adapter->flags);
        pci_set_master(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
index d6d4ed7acf03117232778a82a757ee44d65f765a..31277d3bb7dc1241032695d2d9424779654f4f5f 100644 (file)
@@ -1367,6 +1367,9 @@ out:
  *  Checks to see of the link status of the hardware has changed.  If a
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
+ *
+ *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ *  up).
  **/
 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 {
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
         * Change or Rx Sequence Error interrupt.
         */
        if (!mac->get_link_status)
-               return 0;
+               return 1;
 
        /* First we want to see if the MII Status Register reports
         * link.  If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
         * different link partner.
         */
        ret_val = e1000e_config_fc_after_link_up(hw);
-       if (ret_val)
+       if (ret_val) {
                e_dbg("Error configuring flow control\n");
+               return ret_val;
+       }
 
-       return ret_val;
+       return 1;
 }
 
 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
index 321d8be80871ce2fc4a5ab39c3189dad2370c988..42dcaefc4c1942777edf6044953ccd1f798cac2a 100644 (file)
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
        else
                netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
 
+       /* Copy the address first, so that we avoid a possible race with
+        * .set_rx_mode(). If we copy after changing the address in the filter
+        * list, we might open ourselves to a narrow race window where
+        * .set_rx_mode could delete our dev_addr filter and prevent traffic
+        * from passing.
+        */
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
        spin_lock_bh(&vsi->mac_filter_hash_lock);
        i40e_del_mac_filter(vsi, netdev->dev_addr);
        i40e_add_mac_filter(vsi, addr->sa_data);
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
-       ether_addr_copy(netdev->dev_addr, addr->sa_data);
        if (vsi->type == I40E_VSI_MAIN) {
                i40e_status ret;
 
@@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 
+       /* Under some circumstances, we might receive a request to delete
+        * our own device address from our uc list. Because we store the
+        * device address in the VSI's MAC/VLAN filter list, we need to ignore
+        * such requests and not delete our device address from this list.
+        */
+       if (ether_addr_equal(addr, netdev->dev_addr))
+               return 0;
+
        i40e_del_mac_filter(vsi, addr);
 
        return 0;
@@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
        /* Set Bit 7 to be valid */
        mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
 
-       /* Set L4type to both TCP and UDP support */
-       mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
+       /* Set L4type for TCP support */
+       mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
 
        /* Set cloud filter mode */
        mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
             is_valid_ether_addr(filter->src_mac)) ||
            (is_multicast_ether_addr(filter->dst_mac) &&
             is_multicast_ether_addr(filter->src_mac)))
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
-       /* Make sure port is specified, otherwise bail out, for channel
-        * specific cloud filter needs 'L4 port' to be non-zero
+       /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
+        * ports are not supported via big buffer now.
         */
-       if (!filter->dst_port)
-               return -EINVAL;
+       if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
+               return -EOPNOTSUPP;
 
        /* adding filter using src_port/src_ip is not supported at this stage */
        if (filter->src_port || filter->src_ipv4 ||
            !ipv6_addr_any(&filter->ip.v6.src_ip6))
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
        /* copy element needed to add cloud filter from filter */
        i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
            is_multicast_ether_addr(filter->src_mac)) {
                /* MAC + IP : unsupported mode */
                if (filter->dst_ipv4)
-                       return -EINVAL;
+                       return -EOPNOTSUPP;
 
                /* since we validated that L4 port must be valid before
                 * we get here, start with respective "flags" value
@@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
 
        if (tc < 0) {
                dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
-               return -EINVAL;
+               return -EOPNOTSUPP;
        }
 
        if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
index 4566d66ffc7c95a72c4135075a19d5ac8bcd2689..5bc2748ac46860df3a14553d870197d0d67ddda4 100644 (file)
@@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
-       stale = &skb_shinfo(skb)->frags[0];
-       for (;;) {
+       for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+               int stale_size = skb_frag_size(stale);
+
                sum += skb_frag_size(frag++);
 
+               /* The stale fragment may present us with a smaller
+                * descriptor than the actual fragment size. To account
+                * for that we need to remove all the data on the front and
+                * figure out what the remainder would be in the last
+                * descriptor associated with the fragment.
+                */
+               if (stale_size > I40E_MAX_DATA_PER_TXD) {
+                       int align_pad = -(stale->page_offset) &
+                                       (I40E_MAX_READ_REQ_SIZE - 1);
+
+                       sum -= align_pad;
+                       stale_size -= align_pad;
+
+                       do {
+                               sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+                               stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+                       } while (stale_size > I40E_MAX_DATA_PER_TXD);
+               }
+
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
                        return true;
@@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
                if (!nr_frags--)
                        break;
 
-               sum -= skb_frag_size(stale++);
+               sum -= stale_size;
        }
 
        return false;
index 50864f99446d3a9f22c83c984eaec4f25caa6015..1ba29bb85b6706c15b5d80d760a7128afd31e0fe 100644 (file)
@@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
-       stale = &skb_shinfo(skb)->frags[0];
-       for (;;) {
+       for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+               int stale_size = skb_frag_size(stale);
+
                sum += skb_frag_size(frag++);
 
+               /* The stale fragment may present us with a smaller
+                * descriptor than the actual fragment size. To account
+                * for that we need to remove all the data on the front and
+                * figure out what the remainder would be in the last
+                * descriptor associated with the fragment.
+                */
+               if (stale_size > I40E_MAX_DATA_PER_TXD) {
+                       int align_pad = -(stale->page_offset) &
+                                       (I40E_MAX_READ_REQ_SIZE - 1);
+
+                       sum -= align_pad;
+                       stale_size -= align_pad;
+
+                       do {
+                               sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+                               stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+                       } while (stale_size > I40E_MAX_DATA_PER_TXD);
+               }
+
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
                        return true;
@@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
                if (!nr_frags--)
                        break;
 
-               sum -= skb_frag_size(stale++);
+               sum -= stale_size;
        }
 
        return false;
index 23f7d828cf676244766e582674a2946ea467bb6b..6ef20e5cc77dd314a60c9b1330fb421f81ceda7a 100644 (file)
@@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
                return 0;
        }
 
-       wmb(); /* reset needs to be written before we read control register */
+       /* Reset needs to be written before we read control register, and
+        * we must wait for the HW to become responsive once again
+        */
+       wmb();
+       msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
+
        end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
        do {
                u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
index a6441208e9d96e0b96de0cde155f50fa437f56ae..fb082ad21b00e43435003e186eae92ff3d197a66 100644 (file)
@@ -59,6 +59,7 @@
 #define MLXSW_PCI_SW_RESET                     0xF0010
 #define MLXSW_PCI_SW_RESET_RST_BIT             BIT(0)
 #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       5000
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS          100
 #define MLXSW_PCI_FW_READY                     0xA1844
 #define MLXSW_PCI_FW_READY_MASK                        0xFFFF
 #define MLXSW_PCI_FW_READY_MAGIC               0x5E
index 9bd8d28de1522906b92021a8cf2c476b79c35a56..c3837ca7a705bd4d6ed48c1877d0f0f0de984c85 100644 (file)
@@ -4376,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
                }
                if (!info->linking)
                        break;
-               if (netdev_has_any_upper_dev(upper_dev)) {
+               if (netdev_has_any_upper_dev(upper_dev) &&
+                   (!netif_is_bridge_master(upper_dev) ||
+                    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+                                                         upper_dev))) {
                        NL_SET_ERR_MSG(extack,
                                       "spectrum: Enslaving a port to a device that already has an upper device is not supported");
                        return -EINVAL;
@@ -4504,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
                                              u16 vid)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        struct netdev_notifier_changeupper_info *info = ptr;
        struct netlink_ext_ack *extack;
        struct net_device *upper_dev;
@@ -4520,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
                }
                if (!info->linking)
                        break;
-               if (netdev_has_any_upper_dev(upper_dev)) {
+               if (netdev_has_any_upper_dev(upper_dev) &&
+                   (!netif_is_bridge_master(upper_dev) ||
+                    !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+                                                         upper_dev))) {
                        NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
                        return -EINVAL;
                }
index 432ab9b12b7f59ded586a663f0a686741740463d..05ce1befd9b378b41584c149abd6a0d53c4be246 100644 (file)
@@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
                                struct net_device *brport_dev,
                                struct net_device *br_dev);
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+                                        const struct net_device *br_dev);
 
 /* spectrum.c */
 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
index c33beac5def0615ca24a7ade5f8fdd6b2651647c..b5397da94d7f5b178da560145ee8def18825df6e 100644 (file)
@@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
                                  int tclass_num, u32 min, u32 max,
                                  u32 probability, bool is_ecn)
 {
-       char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
+       char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+       char cwtp_cmd[MLXSW_REG_CWTP_LEN];
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        int err;
 
@@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
        if (err)
                return err;
 
-       mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
+       mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
                             MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
 
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
 }
 
 static int
index be657b8533f04922a61a2f3a4b1aeddf3137cdf5..434b3922b34f06689e3e85c0dc70810902c51275 100644 (file)
@@ -3228,7 +3228,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
 {
        if (!removing)
                nh->should_offload = 1;
-       else if (nh->offloaded)
+       else
                nh->should_offload = 0;
        nh->update = 1;
 }
index 7b8548e25ae73da7e835c7a8da1eaaece8cbbcfb..593ad31be7490d6af8abf3f5c3929de925c0ca77 100644 (file)
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
        return NULL;
 }
 
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+                                        const struct net_device *br_dev)
+{
+       return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+}
+
 static struct mlxsw_sp_bridge_device *
 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
                              struct net_device *br_dev)
index 1a603fdd9e802d1344e37bcfa719404b69312e43..99b0487b6d820b53befa8a5904a17d5a9176081a 100644 (file)
@@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
                return err;
        }
        nn_writeb(nn, ctrl_offset, entry->entry);
+       nfp_net_irq_unmask(nn, entry->entry);
 
        return 0;
 }
@@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
                                 unsigned int vector_idx)
 {
        nn_writeb(nn, ctrl_offset, 0xff);
+       nn_pci_flush(nn);
        free_irq(nn->irq_entries[vector_idx].vector, nn);
 }
 
index 75323000c3646bc781c12287367f8b455ada5a6a..b9e2846589f8678e72683c230d601d7f517de5da 100644 (file)
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
        [FWNLCR0]       = 0x0090,
        [FWALCR0]       = 0x0094,
        [TXNLCR1]       = 0x00a0,
-       [TXALCR1]       = 0x00a0,
+       [TXALCR1]       = 0x00a4,
        [RXNLCR1]       = 0x00a8,
        [RXALCR1]       = 0x00ac,
        [FWNLCR1]       = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
        [FWNLCR0]       = 0x0090,
        [FWALCR0]       = 0x0094,
        [TXNLCR1]       = 0x00a0,
-       [TXALCR1]       = 0x00a0,
+       [TXALCR1]       = 0x00a4,
        [RXNLCR1]       = 0x00a8,
        [RXALCR1]       = 0x00ac,
        [FWNLCR1]       = 0x00b0,
@@ -3225,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        /* ioremap the TSU registers */
        if (mdp->cd->tsu) {
                struct resource *rtsu;
+
                rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
-               if (IS_ERR(mdp->tsu_addr)) {
-                       ret = PTR_ERR(mdp->tsu_addr);
+               if (!rtsu) {
+                       dev_err(&pdev->dev, "no TSU resource\n");
+                       ret = -ENODEV;
+                       goto out_release;
+               }
+               /* We can only request the  TSU region  for the first port
+                * of the two  sharing this TSU for the probe to succeed...
+                */
+               if (devno % 2 == 0 &&
+                   !devm_request_mem_region(&pdev->dev, rtsu->start,
+                                            resource_size(rtsu),
+                                            dev_name(&pdev->dev))) {
+                       dev_err(&pdev->dev, "can't request TSU resource.\n");
+                       ret = -EBUSY;
+                       goto out_release;
+               }
+               mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
+                                            resource_size(rtsu));
+               if (!mdp->tsu_addr) {
+                       dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
+                       ret = -ENOMEM;
                        goto out_release;
                }
                mdp->port = devno % 2;
                ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
        }
 
-       /* initialize first or needed device */
-       if (!devno || pd->needs_init) {
+       /* Need to init only the first port of the two sharing a TSU */
+       if (devno % 2 == 0) {
                if (mdp->cd->chip_reset)
                        mdp->cd->chip_reset(ndev);
 
index 337d53d12e94b3acfe745e48422b44d1939ad2c0..c0af0bc4e714891080e966c7d6ddd5e47e8ab5a5 100644 (file)
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
 bool stmmac_eee_init(struct stmmac_priv *priv)
 {
        struct net_device *ndev = priv->dev;
+       int interface = priv->plat->interface;
        unsigned long flags;
        bool ret = false;
 
+       if ((interface != PHY_INTERFACE_MODE_MII) &&
+           (interface != PHY_INTERFACE_MODE_GMII) &&
+           !phy_interface_mode_is_rgmii(interface))
+               goto out;
+
        /* Using PCS we cannot dial with the phy registers at this stage
         * so we do not support extra feature like EEE.
         */
index b718a02a6bb6055d7841619b42cb400d1eea0984..0a48b3073d3d3614483e9d8ae64f073a5b3d2ead 100644 (file)
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        if (IS_ERR(rt))
                return PTR_ERR(rt);
 
+       if (skb_dst(skb)) {
+               int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
+                         GENEVE_BASE_HLEN - info->options_len - 14;
+
+               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       }
+
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        if (geneve->collect_md) {
                tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        if (IS_ERR(dst))
                return PTR_ERR(dst);
 
+       if (skb_dst(skb)) {
+               int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
+                         GENEVE_BASE_HLEN - info->options_len - 14;
+
+               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       }
+
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        if (geneve->collect_md) {
                prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
index a178c5efd33e5486d47608dd7e4d7218b8eda4a1..a0f2be81d52e4a4fd24585d8957b181a3cf9a103 100644 (file)
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        return 0;
 
 unregister_netdev:
+       /* macvlan_uninit would free the macvlan port */
        unregister_netdevice(dev);
+       return err;
 destroy_macvlan_port:
-       if (create)
+       /* the macvlan port may be freed by macvlan_uninit when fail to register.
+        * so we destroy the macvlan port only when it's valid.
+        */
+       if (create && macvlan_port_get_rtnl(dev))
                macvlan_port_destroy(port->dev);
        return err;
 }
index 135296508a7ed70661f7e8f9aa9c0eb7fadf3133..6425ce04d3f95d6ca7a7b2ec7a22b7f860358a1e 100644 (file)
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
 
        data->regulator = devm_regulator_get(&pdev->dev, "phy");
        if (IS_ERR(data->regulator)) {
-               if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
+               if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
+                       ret = -EPROBE_DEFER;
+                       goto err_out_free_mdiobus;
+               }
 
                dev_info(&pdev->dev, "no regulator found\n");
                data->regulator = NULL;
index 827f3f92560e711a17514055cb96adfbc05708f7..249ce5cbea2201902563ceb6b9339809e8ccb99e 100644 (file)
@@ -1296,6 +1296,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
                switch (cmd) {
                case SIOCGMIIPHY:
                        mii->phy_id = pl->phydev->mdio.addr;
+                       /* fall through */
 
                case SIOCGMIIREG:
                        ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1318,6 +1319,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
                switch (cmd) {
                case SIOCGMIIPHY:
                        mii->phy_id = 0;
+                       /* fall through */
 
                case SIOCGMIIREG:
                        ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1429,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream)
        WARN_ON(!lockdep_rtnl_is_held());
 
        set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
+       queue_work(system_power_efficient_wq, &pl->resolve);
        flush_work(&pl->resolve);
-
-       netif_carrier_off(pl->netdev);
 }
 
 static void phylink_sfp_link_up(void *upstream)
index 8a1b1f4c1b7c675278902ec78c31a632df44351f..ab64a142b832c20328f73c691fb566b78c2c8399 100644 (file)
@@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);
 void sfp_unregister_upstream(struct sfp_bus *bus)
 {
        rtnl_lock();
-       sfp_unregister_bus(bus);
+       if (bus->sfp)
+               sfp_unregister_bus(bus);
        bus->upstream = NULL;
        bus->netdev = NULL;
        rtnl_unlock();
@@ -459,7 +460,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
 void sfp_unregister_socket(struct sfp_bus *bus)
 {
        rtnl_lock();
-       sfp_unregister_bus(bus);
+       if (bus->netdev)
+               sfp_unregister_bus(bus);
        bus->sfp_dev = NULL;
        bus->sfp = NULL;
        bus->socket_ops = NULL;
index 3000ddd1c7e2e481bb961deb86099b5c2ea11371..728819feab44db75f6b4c369767c3ed6c1966cbf 100644 (file)
@@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+       {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
        {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
index f7d228b5ba933f744474be119802f41e461c5715..987f1252a3cf888d76432ee4f55b1bd215358209 100644 (file)
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
                }
        }
 
+       if (changed & IEEE80211_CONF_CHANGE_PS) {
+               list_for_each_entry(tmp, &wcn->vif_list, list) {
+                       vif = wcn36xx_priv_to_vif(tmp);
+                       if (hw->conf.flags & IEEE80211_CONF_PS) {
+                               if (vif->bss_conf.ps) /* ps allowed ? */
+                                       wcn36xx_pmc_enter_bmps_state(wcn, vif);
+                       } else {
+                               wcn36xx_pmc_exit_bmps_state(wcn, vif);
+                       }
+               }
+       }
+
        mutex_unlock(&wcn->conf_mutex);
 
        return 0;
@@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
                vif_priv->dtim_period = bss_conf->dtim_period;
        }
 
-       if (changed & BSS_CHANGED_PS) {
-               wcn36xx_dbg(WCN36XX_DBG_MAC,
-                           "mac bss PS set %d\n",
-                           bss_conf->ps);
-               if (bss_conf->ps) {
-                       wcn36xx_pmc_enter_bmps_state(wcn, vif);
-               } else {
-                       wcn36xx_pmc_exit_bmps_state(wcn, vif);
-               }
-       }
-
        if (changed & BSS_CHANGED_BSSID) {
                wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
                            bss_conf->bssid);
index 589fe5f7097160a4bcee5d1b1be979ce389e297c..1976b80c235fe5e4c5c591a761c1dc8d1d7b941b 100644 (file)
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
        struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
        if (WCN36XX_BMPS != vif_priv->pw_state) {
-               wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
-               return -EINVAL;
+               /* Unbalanced call or last BMPS enter failed */
+               wcn36xx_dbg(WCN36XX_DBG_PMC,
+                           "Not in BMPS mode, no need to exit\n");
+               return -EALREADY;
        }
        wcn36xx_smd_exit_bmps(wcn, vif);
        vif_priv->pw_state = WCN36XX_FULL_POWER;
index d749abeca3ae99866c33ffce095d09cfce8c8ff0..403e65c309d0ac8e22fa84f163c636b18bc5872c 100644 (file)
@@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
        return index & (q->n_window - 1);
 }
 
-static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
                                     struct iwl_txq *txq, int idx)
 {
-       return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq,
-                                                                        idx);
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (trans->cfg->use_tfh)
+               idx = iwl_pcie_get_cmd_index(txq, idx);
+
+       return txq->tfds + trans_pcie->tfd_size * idx;
 }
 
 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
index 16b345f54ff0003db66f70f547338558bc7c4703..6d0a907d5ba58f8666d06e65744d1c179edf3013 100644 (file)
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
 
 static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
        /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
         * idx is bounded by n_window
         */
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
        lockdep_assert_held(&txq->lock);
 
        iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
-                               iwl_pcie_get_tfd(trans_pcie, txq, idx));
+                               iwl_pcie_get_tfd(trans, txq, idx));
 
        /* free SKB */
        if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
                                            struct sk_buff *skb,
                                            struct iwl_cmd_meta *out_meta)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
-       struct iwl_tfh_tfd *tfd =
-               iwl_pcie_get_tfd(trans_pcie, txq, idx);
+       struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
        dma_addr_t tb_phys;
        bool amsdu;
        int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
        u8 group_id = iwl_cmd_groupid(cmd->id);
        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
-       struct iwl_tfh_tfd *tfd =
-               iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+       struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
 
        memset(tfd, 0, sizeof(*tfd));
 
index fed6d842a5e1dc444fe58ad203cb8fb5e3ab1c6f..3f85713c41dcc9291130f25873ef97bd702335f1 100644 (file)
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int i, num_tbs;
-       void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
+       void *tfd = iwl_pcie_get_tfd(trans, txq, index);
 
        /* Sanity check on number of chunks */
        num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
        }
 
        trace_iwlwifi_dev_tx(trans->dev, skb,
-                            iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+                            iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
                             trans_pcie->tfd_size,
                             &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
                             hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                IEEE80211_CCMP_HDR_LEN : 0;
 
        trace_iwlwifi_dev_tx(trans->dev, skb,
-                            iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+                            iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
                             trans_pcie->tfd_size,
                             &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
 
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
               IWL_FIRST_TB_SIZE);
 
-       tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+       tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
        /* Set up entry for this TFD in Tx byte-count array */
        iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
                                         iwl_pcie_tfd_get_num_tbs(trans, tfd));
index c5a34671abdaf78a1257b869af817ecd148a6e0c..9bd7ddeeb6a5c7b29569e51ad2f469d9fe493a76 100644 (file)
@@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
 
        netif_carrier_off(netdev);
 
+       xenbus_switch_state(dev, XenbusStateInitialising);
        return netdev;
 
  exit:
index 1e46e60b8f1080e339ebe81c1710dabb23afef75..839650e0926af1aaaf21bafbdc1baa79cf907d76 100644 (file)
@@ -1335,6 +1335,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
                struct nvme_ns *ns, struct nvme_id_ns *id)
 {
        sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+       unsigned short bs = 1 << ns->lba_shift;
        unsigned stream_alignment = 0;
 
        if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
@@ -1343,7 +1344,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
        blk_mq_freeze_queue(disk->queue);
        blk_integrity_unregister(disk);
 
-       blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift);
+       blk_queue_logical_block_size(disk->queue, bs);
+       blk_queue_physical_block_size(disk->queue, bs);
+       blk_queue_io_min(disk->queue, bs);
+
        if (ns->ms && !ns->ext &&
            (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
                nvme_init_integrity(disk, ns->ms, ns->pi_type);
@@ -2987,6 +2991,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        mutex_unlock(&ns->ctrl->namespaces_mutex);
 
        synchronize_srcu(&ns->head->srcu);
+       nvme_mpath_check_last_path(ns);
        nvme_put_ns(ns);
 }
 
index ea1aa5283e8ed9215537594a33a243d47b363e25..a00eabd0642738bbdbaf0b11d7f8e1747c996e3c 100644 (file)
@@ -417,6 +417,15 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
                rcu_assign_pointer(head->current_path, NULL);
 }
 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+       struct nvme_ns_head *head = ns->head;
+
+       if (head->disk && list_empty(&head->list))
+               kblockd_schedule_work(&head->requeue_work);
+}
+
 #else
 static inline void nvme_failover_req(struct request *req)
 {
@@ -448,6 +457,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
 static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
 {
 }
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 #ifdef CONFIG_NVM
index f5800c3c9082a6f038c129327bccd22f5eb861fe..d53550e612bc1350febf84c891e2a28c24a6ae34 100644 (file)
@@ -448,12 +448,31 @@ static void **nvme_pci_iod_list(struct request *req)
        return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
 }
 
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       unsigned int avg_seg_size;
+
+       avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
+                       blk_rq_nr_phys_segments(req));
+
+       if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+               return false;
+       if (!iod->nvmeq->qid)
+               return false;
+       if (!sgl_threshold || avg_seg_size < sgl_threshold)
+               return false;
+       return true;
+}
+
 static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
        int nseg = blk_rq_nr_phys_segments(rq);
        unsigned int size = blk_rq_payload_bytes(rq);
 
+       iod->use_sgl = nvme_pci_use_sgls(dev, rq);
+
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
                size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
                                iod->use_sgl);
@@ -604,8 +623,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
        dma_addr_t prp_dma;
        int nprps, i;
 
-       iod->use_sgl = false;
-
        length -= (page_size - offset);
        if (length <= 0) {
                iod->first_dma = 0;
@@ -715,8 +732,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
        int entries = iod->nents, i = 0;
        dma_addr_t sgl_dma;
 
-       iod->use_sgl = true;
-
        /* setting the transfer type as SGL */
        cmd->flags = NVME_CMD_SGL_METABUF;
 
@@ -770,23 +785,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
        return BLK_STS_OK;
 }
 
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
-{
-       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       unsigned int avg_seg_size;
-
-       avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
-                       blk_rq_nr_phys_segments(req));
-
-       if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
-               return false;
-       if (!iod->nvmeq->qid)
-               return false;
-       if (!sgl_threshold || avg_seg_size < sgl_threshold)
-               return false;
-       return true;
-}
-
 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                struct nvme_command *cmnd)
 {
@@ -806,7 +804,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                                DMA_ATTR_NO_WARN))
                goto out;
 
-       if (nvme_pci_use_sgls(dev, req))
+       if (iod->use_sgl)
                ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
        else
                ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
index 37af56596be6ce8a0339ce2a3151f8dd98f8f854..2a0bba7f50cf43bb76e9d1f3073e24ba1edd9e1c 100644 (file)
@@ -974,12 +974,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
        nvme_start_queues(&ctrl->ctrl);
 
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+               /* state change failure should never happen */
+               WARN_ON_ONCE(1);
+               return;
+       }
+
        nvme_rdma_reconnect_or_remove(ctrl);
 }
 
 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
 {
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
                return;
 
        queue_work(nvme_wq, &ctrl->err_work);
@@ -1753,6 +1759,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
        nvme_stop_ctrl(&ctrl->ctrl);
        nvme_rdma_shutdown_ctrl(ctrl, false);
 
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+               /* state change failure should never happen */
+               WARN_ON_ONCE(1);
+               return;
+       }
+
        ret = nvme_rdma_configure_admin_queue(ctrl, false);
        if (ret)
                goto out_fail;
index 7b75d9de55ab0d33939bf314a81ee01e26a6d1b1..6a018a0bd6ce851306dd82e5c21e680c626f99d5 100644 (file)
@@ -1085,7 +1085,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
 {
        struct fcloop_nport *nport = NULL, *tmpport;
-       struct fcloop_tport *tport;
+       struct fcloop_tport *tport = NULL;
        u64 nodename, portname;
        unsigned long flags;
        int ret;
index 3481e69738b5f94d18cc393f77934fac7cfc7fdc..a327be1d264b8cea389feeb91423bb24a6a0d688 100644 (file)
@@ -231,7 +231,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                        rc = of_mdiobus_register_phy(mdio, child, addr);
                else
                        rc = of_mdiobus_register_device(mdio, child, addr);
-               if (rc)
+
+               if (rc == -ENODEV)
+                       dev_err(&mdio->dev,
+                               "MDIO device at address %d is missing.\n",
+                               addr);
+               else if (rc)
                        goto unregister;
        }
 
@@ -255,7 +260,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 
                        if (of_mdiobus_child_is_phy(child)) {
                                rc = of_mdiobus_register_phy(mdio, child, addr);
-                               if (rc)
+                               if (rc && rc != -ENODEV)
                                        goto unregister;
                        }
                }
index 791449a2370f4f10af698dbaf27bb3749d6d263a..daa68acbc9003b34615fc43b810c5fb77dd87ecf 100644 (file)
@@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void)
        class_unregister(&wmi_bus_class);
 }
 
-subsys_initcall(acpi_wmi_init);
+subsys_initcall_sync(acpi_wmi_init);
 module_exit(acpi_wmi_exit);
index 57efbd3b053b37ca43816483dd3364c3c48a761e..bd56653b9bbc2bed8e27c4909b2e8c025c4f1627 100644 (file)
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
                }
                range = 0;
                while (range < pages) {
-                       if (map->unmap_ops[offset+range].handle == -1) {
-                               range--;
+                       if (map->unmap_ops[offset+range].handle == -1)
                                break;
-                       }
                        range++;
                }
                err = __unmap_grant_pages(map, offset, range);
@@ -1073,8 +1071,10 @@ unlock_out:
 out_unlock_put:
        mutex_unlock(&priv->lock);
 out_put_map:
-       if (use_ptemod)
+       if (use_ptemod) {
                map->vma = NULL;
+               unmap_grant_pages(map, 0, map->count);
+       }
        gntdev_put_map(priv, map);
        return err;
 }
index b63a592ad29d55c3714320593a92f0da8b563ac5..0b25cf87b6d608b6a1646e89e1408270f148a783 100644 (file)
@@ -43,7 +43,14 @@ struct bpf_map_ops {
 };
 
 struct bpf_map {
-       atomic_t refcnt;
+       /* 1st cacheline with read-mostly members of which some
+        * are also accessed in fast-path (e.g. ops, max_entries).
+        */
+       const struct bpf_map_ops *ops ____cacheline_aligned;
+       struct bpf_map *inner_map_meta;
+#ifdef CONFIG_SECURITY
+       void *security;
+#endif
        enum bpf_map_type map_type;
        u32 key_size;
        u32 value_size;
@@ -52,15 +59,17 @@ struct bpf_map {
        u32 pages;
        u32 id;
        int numa_node;
-       struct user_struct *user;
-       const struct bpf_map_ops *ops;
-       struct work_struct work;
+       bool unpriv_array;
+       /* 7 bytes hole */
+
+       /* 2nd cacheline with misc members to avoid false sharing
+        * particularly with refcounting.
+        */
+       struct user_struct *user ____cacheline_aligned;
+       atomic_t refcnt;
        atomic_t usercnt;
-       struct bpf_map *inner_map_meta;
+       struct work_struct work;
        char name[BPF_OBJ_NAME_LEN];
-#ifdef CONFIG_SECURITY
-       void *security;
-#endif
 };
 
 /* function argument constraints */
@@ -221,6 +230,7 @@ struct bpf_prog_aux {
 struct bpf_array {
        struct bpf_map map;
        u32 elem_size;
+       u32 index_mask;
        /* 'ownership' of prog_array is claimed by the first program that
         * is going to use this map or by the first program which FD is stored
         * in the map to make sure that all callers and callees have the same
index ff3642d267f7f3311a063e2b1e2b2ebd946686e8..94081e9a5010608d81f4a30badef921b69ee5ecc 100644 (file)
@@ -17,7 +17,6 @@ struct sh_eth_plat_data {
        unsigned char mac_addr[ETH_ALEN];
        unsigned no_ether_link:1;
        unsigned ether_link_active_low:1;
-       unsigned needs_init:1;
 };
 
 #endif
index 2f8f93da5dc2660f4db37c04f8a434809b3120a1..9a5ccf03a59b1e0a4820e2a978c66f1df8a9a82f 100644 (file)
@@ -966,7 +966,7 @@ void sctp_transport_burst_limited(struct sctp_transport *);
 void sctp_transport_burst_reset(struct sctp_transport *);
 unsigned long sctp_transport_timeout(struct sctp_transport *);
 void sctp_transport_reset(struct sctp_transport *t);
-void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
+bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
 void sctp_transport_immediate_rtx(struct sctp_transport *);
 void sctp_transport_dst_release(struct sctp_transport *t);
 void sctp_transport_dst_confirm(struct sctp_transport *t);
index 13223396dc64abdbcd504b2439214efeb5c6def8..f96391e84a8a3161df98a964692e7ce445050944 100644 (file)
@@ -146,7 +146,7 @@ struct vxlanhdr_gpe {
                np_applied:1,
                instance_applied:1,
                version:2,
-reserved_flags2:2;
+               reserved_flags2:2;
 #elif defined(__BIG_ENDIAN_BITFIELD)
        u8      reserved_flags2:2,
                version:2,
index 3ee3bf7c85262b034702875b7d356ac7595abea6..144de4d2f385e0b05233162ec425a6763262e038 100644 (file)
@@ -23,6 +23,7 @@
 #define _UAPI_LINUX_IF_ETHER_H
 
 #include <linux/types.h>
+#include <linux/libc-compat.h>
 
 /*
  *     IEEE 802.3 Ethernet magic constants.  The frame sizes omit the preamble
  *     This is an Ethernet frame header.
  */
 
+#if __UAPI_DEF_ETHHDR
 struct ethhdr {
        unsigned char   h_dest[ETH_ALEN];       /* destination eth addr */
        unsigned char   h_source[ETH_ALEN];     /* source ether addr    */
        __be16          h_proto;                /* packet type ID field */
 } __attribute__((packed));
+#endif
 
 
 #endif /* _UAPI_LINUX_IF_ETHER_H */
index 282875cf805657b41eb0b5a797aa77647b889159..fc29efaa918cb76fab92a65890cca1aab3ecf9ba 100644 (file)
 
 /* If we did not see any headers from any supported C libraries,
  * or we are being included in the kernel, then define everything
- * that we need. */
+ * that we need. Check for previous __UAPI_* definitions to give
+ * unsupported C libraries a way to opt out of any kernel definition. */
 #else /* !defined(__GLIBC__) */
 
 /* Definitions for if.h */
+#ifndef __UAPI_DEF_IF_IFCONF
 #define __UAPI_DEF_IF_IFCONF 1
+#endif
+#ifndef __UAPI_DEF_IF_IFMAP
 #define __UAPI_DEF_IF_IFMAP 1
+#endif
+#ifndef __UAPI_DEF_IF_IFNAMSIZ
 #define __UAPI_DEF_IF_IFNAMSIZ 1
+#endif
+#ifndef __UAPI_DEF_IF_IFREQ
 #define __UAPI_DEF_IF_IFREQ 1
+#endif
 /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS
 #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+#endif
 /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
 #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif
 
 /* Definitions for in.h */
+#ifndef __UAPI_DEF_IN_ADDR
 #define __UAPI_DEF_IN_ADDR             1
+#endif
+#ifndef __UAPI_DEF_IN_IPPROTO
 #define __UAPI_DEF_IN_IPPROTO          1
+#endif
+#ifndef __UAPI_DEF_IN_PKTINFO
 #define __UAPI_DEF_IN_PKTINFO          1
+#endif
+#ifndef __UAPI_DEF_IP_MREQ
 #define __UAPI_DEF_IP_MREQ             1
+#endif
+#ifndef __UAPI_DEF_SOCKADDR_IN
 #define __UAPI_DEF_SOCKADDR_IN         1
+#endif
+#ifndef __UAPI_DEF_IN_CLASS
 #define __UAPI_DEF_IN_CLASS            1
+#endif
 
 /* Definitions for in6.h */
+#ifndef __UAPI_DEF_IN6_ADDR
 #define __UAPI_DEF_IN6_ADDR            1
+#endif
+#ifndef __UAPI_DEF_IN6_ADDR_ALT
 #define __UAPI_DEF_IN6_ADDR_ALT                1
+#endif
+#ifndef __UAPI_DEF_SOCKADDR_IN6
 #define __UAPI_DEF_SOCKADDR_IN6                1
+#endif
+#ifndef __UAPI_DEF_IPV6_MREQ
 #define __UAPI_DEF_IPV6_MREQ           1
+#endif
+#ifndef __UAPI_DEF_IPPROTO_V6
 #define __UAPI_DEF_IPPROTO_V6          1
+#endif
+#ifndef __UAPI_DEF_IPV6_OPTIONS
 #define __UAPI_DEF_IPV6_OPTIONS                1
+#endif
+#ifndef __UAPI_DEF_IN6_PKTINFO
 #define __UAPI_DEF_IN6_PKTINFO         1
+#endif
+#ifndef __UAPI_DEF_IP6_MTUINFO
 #define __UAPI_DEF_IP6_MTUINFO         1
+#endif
 
 /* Definitions for ipx.h */
+#ifndef __UAPI_DEF_SOCKADDR_IPX
 #define __UAPI_DEF_SOCKADDR_IPX                        1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
 #define __UAPI_DEF_IPX_ROUTE_DEFINITION                1
+#endif
+#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
 #define __UAPI_DEF_IPX_INTERFACE_DEFINITION    1
+#endif
+#ifndef __UAPI_DEF_IPX_CONFIG_DATA
 #define __UAPI_DEF_IPX_CONFIG_DATA             1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEF
 #define __UAPI_DEF_IPX_ROUTE_DEF               1
+#endif
 
 /* Definitions for xattr.h */
+#ifndef __UAPI_DEF_XATTR
 #define __UAPI_DEF_XATTR               1
+#endif
 
 #endif /* __GLIBC__ */
 
+/* Definitions for if_ether.h */
+/* allow libcs like musl to deactivate this, glibc does not implement this. */
+#ifndef __UAPI_DEF_ETHHDR
+#define __UAPI_DEF_ETHHDR              1
+#endif
+
 #endif /* _UAPI_LIBC_COMPAT_H */
index 3fea7709a4412c5e3c7f94daed08576b55bf3bc2..57ccfb32e87f97bad06f7a263ceecc20cd74dc9d 100644 (file)
@@ -36,7 +36,7 @@ enum ip_conntrack_info {
 
 #define NF_CT_STATE_INVALID_BIT                        (1 << 0)
 #define NF_CT_STATE_BIT(ctinfo)                        (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
-#define NF_CT_STATE_UNTRACKED_BIT              (1 << (IP_CT_UNTRACKED + 1))
+#define NF_CT_STATE_UNTRACKED_BIT              (1 << 6)
 
 /* Bitset representing status of connection. */
 enum ip_conntrack_status {
index 690a381adee0d164f52c9984f61c0d710c54c089..19a6b845d834ba33b4a29be731b79cf63401861f 100644 (file)
@@ -1396,6 +1396,13 @@ config BPF_SYSCALL
          Enable the bpf() system call that allows to manipulate eBPF
          programs and maps via file descriptors.
 
+config BPF_JIT_ALWAYS_ON
+       bool "Permanently enable BPF JIT and remove BPF interpreter"
+       depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
+       help
+         Enables BPF JIT and removes BPF interpreter to avoid
+         speculative execution of BPF instructions by the interpreter
+
 config USERFAULTFD
        bool "Enable userfaultfd() system call"
        select ANON_INODES
index 7c25426d3cf5699ffde545ee9a8d5e254f16f517..aaa319848e7d57422225aa99055b574dc904e36d 100644 (file)
@@ -53,9 +53,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 {
        bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
        int numa_node = bpf_map_attr_numa_node(attr);
+       u32 elem_size, index_mask, max_entries;
+       bool unpriv = !capable(CAP_SYS_ADMIN);
        struct bpf_array *array;
        u64 array_size;
-       u32 elem_size;
 
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -72,11 +73,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 
        elem_size = round_up(attr->value_size, 8);
 
+       max_entries = attr->max_entries;
+       index_mask = roundup_pow_of_two(max_entries) - 1;
+
+       if (unpriv)
+               /* round up array size to nearest power of 2,
+                * since cpu will speculate within index_mask limits
+                */
+               max_entries = index_mask + 1;
+
        array_size = sizeof(*array);
        if (percpu)
-               array_size += (u64) attr->max_entries * sizeof(void *);
+               array_size += (u64) max_entries * sizeof(void *);
        else
-               array_size += (u64) attr->max_entries * elem_size;
+               array_size += (u64) max_entries * elem_size;
 
        /* make sure there is no u32 overflow later in round_up() */
        if (array_size >= U32_MAX - PAGE_SIZE)
@@ -86,6 +96,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        array = bpf_map_area_alloc(array_size, numa_node);
        if (!array)
                return ERR_PTR(-ENOMEM);
+       array->index_mask = index_mask;
+       array->map.unpriv_array = unpriv;
 
        /* copy mandatory map attributes */
        array->map.map_type = attr->map_type;
@@ -121,12 +133,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
        if (unlikely(index >= array->map.max_entries))
                return NULL;
 
-       return array->value + array->elem_size * index;
+       return array->value + array->elem_size * (index & array->index_mask);
 }
 
 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 {
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
        struct bpf_insn *insn = insn_buf;
        u32 elem_size = round_up(map->value_size, 8);
        const int ret = BPF_REG_0;
@@ -135,7 +148,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
 
        *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
        *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
-       *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
+       if (map->unpriv_array) {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
+               *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
+       } else {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
+       }
 
        if (is_power_of_2(elem_size)) {
                *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
@@ -157,7 +175,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
        if (unlikely(index >= array->map.max_entries))
                return NULL;
 
-       return this_cpu_ptr(array->pptrs[index]);
+       return this_cpu_ptr(array->pptrs[index & array->index_mask]);
 }
 
 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
@@ -177,7 +195,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
         */
        size = round_up(map->value_size, 8);
        rcu_read_lock();
-       pptr = array->pptrs[index];
+       pptr = array->pptrs[index & array->index_mask];
        for_each_possible_cpu(cpu) {
                bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
                off += size;
@@ -225,10 +243,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
                return -EEXIST;
 
        if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
-               memcpy(this_cpu_ptr(array->pptrs[index]),
+               memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
                       value, map->value_size);
        else
-               memcpy(array->value + array->elem_size * index,
+               memcpy(array->value +
+                      array->elem_size * (index & array->index_mask),
                       value, map->value_size);
        return 0;
 }
@@ -262,7 +281,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
         */
        size = round_up(map->value_size, 8);
        rcu_read_lock();
-       pptr = array->pptrs[index];
+       pptr = array->pptrs[index & array->index_mask];
        for_each_possible_cpu(cpu) {
                bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
                off += size;
@@ -613,6 +632,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
 static u32 array_of_map_gen_lookup(struct bpf_map *map,
                                   struct bpf_insn *insn_buf)
 {
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
        u32 elem_size = round_up(map->value_size, 8);
        struct bpf_insn *insn = insn_buf;
        const int ret = BPF_REG_0;
@@ -621,7 +641,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
 
        *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
        *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
-       *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+       if (map->unpriv_array) {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
+               *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
+       } else {
+               *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+       }
        if (is_power_of_2(elem_size))
                *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
        else
index 86b50aa26ee80adac9ba7ac52248c64cbea19b26..51ec2dda7f08c6c90af084589bb6d80662c77d12 100644 (file)
@@ -767,6 +767,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 }
 EXPORT_SYMBOL_GPL(__bpf_call_base);
 
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
 /**
  *     __bpf_prog_run - run eBPF program on a given context
  *     @ctx: is the data we are operating on
@@ -1317,6 +1318,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
 };
 
+#else
+static unsigned int __bpf_prog_ret0(const void *ctx,
+                                   const struct bpf_insn *insn)
+{
+       return 0;
+}
+#endif
+
 bool bpf_prog_array_compatible(struct bpf_array *array,
                               const struct bpf_prog *fp)
 {
@@ -1364,9 +1373,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
  */
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
        u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
 
        fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+       fp->bpf_func = __bpf_prog_ret0;
+#endif
 
        /* eBPF JITs can rewrite the program in case constant
         * blinding is active. However, in case of error during
@@ -1376,6 +1389,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
         */
        if (!bpf_prog_is_dev_bound(fp->aux)) {
                fp = bpf_int_jit_compile(fp);
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+               if (!fp->jited) {
+                       *err = -ENOTSUPP;
+                       return fp;
+               }
+#endif
        } else {
                *err = bpf_prog_offload_compile(fp);
                if (*err)
index 5ee2e41893d9662641001c1de8d0776fccb590aa..1712d319c2d84292a39cb5c04cd7e5459f6d8174 100644 (file)
@@ -591,8 +591,15 @@ static void sock_map_free(struct bpf_map *map)
 
                write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
-               smap_list_remove(psock, &stab->sock_map[i]);
-               smap_release_sock(psock, sock);
+               /* This check handles a racing sock event that can get the
+                * sk_callback_lock before this case but after xchg happens
+                * causing the refcnt to hit zero and sock user data (psock)
+                * to be null and queued for garbage collection.
+                */
+               if (likely(psock)) {
+                       smap_list_remove(psock, &stab->sock_map[i]);
+                       smap_release_sock(psock, sock);
+               }
                write_unlock_bh(&sock->sk_callback_lock);
        }
        rcu_read_unlock();
index 04b24876cd23c83c9502afc60853c871ee3fee13..b414d6b2d47070505f3a60fcd8b58478b293d2ad 100644 (file)
@@ -1729,6 +1729,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
        err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
        if (err)
                return err;
+       if (func_id == BPF_FUNC_tail_call) {
+               if (meta.map_ptr == NULL) {
+                       verbose(env, "verifier bug\n");
+                       return -EINVAL;
+               }
+               env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
+       }
        err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
        if (err)
                return err;
@@ -4456,6 +4463,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                         */
                        insn->imm = 0;
                        insn->code = BPF_JMP | BPF_TAIL_CALL;
+
+                       /* instead of changing every JIT dealing with tail_call
+                        * emit two extra insns:
+                        * if (index >= max_entries) goto out;
+                        * index &= array->index_mask;
+                        * to avoid out-of-bounds cpu speculation
+                        */
+                       map_ptr = env->insn_aux_data[i + delta].map_ptr;
+                       if (map_ptr == BPF_MAP_PTR_POISON) {
+                               verbose(env, "tail_call obusing map_ptr\n");
+                               return -EINVAL;
+                       }
+                       if (!map_ptr->unpriv_array)
+                               continue;
+                       insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
+                                                 map_ptr->max_entries, 2);
+                       insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
+                                                   container_of(map_ptr,
+                                                                struct bpf_array,
+                                                                map)->index_mask);
+                       insn_buf[2] = *insn;
+                       cnt = 3;
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
                        continue;
                }
 
index 024085daab1aede5958235b0663c19ec667b5836..a2c05d2476ac5fd3d530ba64938cd85be52fece2 100644 (file)
@@ -123,7 +123,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
         */
        do {
                css_task_iter_start(&from->self, 0, &it);
-               task = css_task_iter_next(&it);
+
+               do {
+                       task = css_task_iter_next(&it);
+               } while (task && (task->flags & PF_EXITING));
+
                if (task)
                        get_task_struct(task);
                css_task_iter_end(&it);
index 0b1ffe147f240c39726d79505c4e02e8fe40cd47..2cf06c274e4ca6cc66194f4ce0fcb9ad0ad82f88 100644 (file)
@@ -1397,7 +1397,7 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
                         cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
                         cft->name);
        else
-               strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
+               strlcpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
        return buf;
 }
 
@@ -1864,9 +1864,9 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
 
        root->flags = opts->flags;
        if (opts->release_agent)
-               strcpy(root->release_agent_path, opts->release_agent);
+               strlcpy(root->release_agent_path, opts->release_agent, PATH_MAX);
        if (opts->name)
-               strcpy(root->name, opts->name);
+               strlcpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN);
        if (opts->cpuset_clone_children)
                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
 }
@@ -4125,26 +4125,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
 
 static void css_task_iter_advance(struct css_task_iter *it)
 {
-       struct list_head *l = it->task_pos;
+       struct list_head *next;
 
        lockdep_assert_held(&css_set_lock);
-       WARN_ON_ONCE(!l);
-
 repeat:
        /*
         * Advance iterator to find next entry.  cset->tasks is consumed
         * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
         * next cset.
         */
-       l = l->next;
+       next = it->task_pos->next;
 
-       if (l == it->tasks_head)
-               l = it->mg_tasks_head->next;
+       if (next == it->tasks_head)
+               next = it->mg_tasks_head->next;
 
-       if (l == it->mg_tasks_head)
+       if (next == it->mg_tasks_head)
                css_task_iter_advance_css_set(it);
        else
-               it->task_pos = l;
+               it->task_pos = next;
 
        /* if PROCS, skip over tasks which aren't group leaders */
        if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
index 9e97480892709957e127e9941710ce45f11ff724..f369889e521d7e7f1e237236651532e6a862e0f5 100644 (file)
@@ -6250,9 +6250,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
                                return NULL;
                        }
                }
-               /* We don't expect to fail. */
                if (*err) {
-                       pr_cont("FAIL to attach err=%d len=%d\n",
+                       pr_cont("FAIL to prog_create err=%d len=%d\n",
                                *err, fprog.len);
                        return NULL;
                }
@@ -6276,6 +6275,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
                 * checks.
                 */
                fp = bpf_prog_select_runtime(fp, err);
+               if (*err) {
+                       pr_cont("FAIL to select_runtime err=%d\n", *err);
+                       return NULL;
+               }
                break;
        }
 
@@ -6461,8 +6464,8 @@ static __init int test_bpf(void)
                                pass_cnt++;
                                continue;
                        }
-
-                       return err;
+                       err_cnt++;
+                       continue;
                }
 
                pr_cont("jited:%u ", fp->jited);
index 8dfdd94e430fd57f7bf6d1da86f18658a057e05d..bad01b14a4ad6b5d4e61ac5e0ed93022755223ab 100644 (file)
@@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
                vlan_gvrp_uninit_applicant(real_dev);
        }
 
-       /* Take it out of our own structures, but be sure to interlock with
-        * HW accelerating devices or SW vlan input packet processing if
-        * VLAN is not 0 (leave it there for 802.1p).
-        */
-       if (vlan_id)
-               vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
+       vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
 
        /* Get rid of the vlan's reference to real_dev */
        dev_put(real_dev);
index 2d38b6e34203b7df5638c340126c2f5169fb3791..e0adcd123f48a1a4f66028bbf731132ed8e7ff17 100644 (file)
@@ -334,9 +334,8 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        mutex_lock(&caifdevs->lock);
        list_add_rcu(&caifd->list, &caifdevs->list);
 
-       strncpy(caifd->layer.name, dev->name,
-               sizeof(caifd->layer.name) - 1);
-       caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+       strlcpy(caifd->layer.name, dev->name,
+               sizeof(caifd->layer.name));
        caifd->layer.transmit = transmit;
        cfcnfg_add_phy_layer(cfg,
                                dev,
index 5cd44f001f6479a5e5ec9d02cfc417b899b05e9d..1a082a946045e53f1617cc9a814bae5dcb9784e7 100644 (file)
@@ -176,9 +176,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
                dev_add_pack(&caif_usb_type);
        pack_added = true;
 
-       strncpy(layer->name, dev->name,
-                       sizeof(layer->name) - 1);
-       layer->name[sizeof(layer->name) - 1] = 0;
+       strlcpy(layer->name, dev->name, sizeof(layer->name));
 
        return 0;
 }
index 273cb07f57d87186224fb50943af949b99b3cde2..8f00bea093b942f8b50193def6d311d09e75b8eb 100644 (file)
@@ -268,17 +268,15 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
        case CAIFPROTO_RFM:
                l->linktype = CFCTRL_SRV_RFM;
                l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
-               strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
-                       sizeof(l->u.rfm.volume)-1);
-               l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
+               strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
+                       sizeof(l->u.rfm.volume));
                break;
        case CAIFPROTO_UTIL:
                l->linktype = CFCTRL_SRV_UTIL;
                l->endpoint = 0x00;
                l->chtype = 0x00;
-               strncpy(l->u.utility.name, s->sockaddr.u.util.service,
-                       sizeof(l->u.utility.name)-1);
-               l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
+               strlcpy(l->u.utility.name, s->sockaddr.u.util.service,
+                       sizeof(l->u.utility.name));
                caif_assert(sizeof(l->u.utility.name) > 10);
                l->u.utility.paramlen = s->param.size;
                if (l->u.utility.paramlen > sizeof(l->u.utility.params))
index f5afda1abc76fe908e8cce160ad032870972424c..655ed7032150309062d0c8c85cda571d34030423 100644 (file)
@@ -258,8 +258,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
                tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
                cfpkt_add_body(pkt, &tmp16, 2);
                memset(utility_name, 0, sizeof(utility_name));
-               strncpy(utility_name, param->u.utility.name,
-                       UTILITY_NAME_LENGTH - 1);
+               strlcpy(utility_name, param->u.utility.name,
+                       UTILITY_NAME_LENGTH);
                cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
                tmp8 = param->u.utility.paramlen;
                cfpkt_add_body(pkt, &tmp8, 1);
index 01ee854454a8089cdd49e2c8964a99f6a2d74730..0e0ba36eeac9852b8df5ddd398dbc66ad6c83760 100644 (file)
@@ -1146,7 +1146,19 @@ EXPORT_SYMBOL(dev_alloc_name);
 int dev_get_valid_name(struct net *net, struct net_device *dev,
                       const char *name)
 {
-       return dev_alloc_name_ns(net, dev, name);
+       BUG_ON(!net);
+
+       if (!dev_valid_name(name))
+               return -EINVAL;
+
+       if (strchr(name, '%'))
+               return dev_alloc_name_ns(net, dev, name);
+       else if (__dev_get_by_name(net, name))
+               return -EEXIST;
+       else if (dev->name != name)
+               strlcpy(dev->name, name, IFNAMSIZ);
+
+       return 0;
 }
 EXPORT_SYMBOL(dev_get_valid_name);
 
index f8fcf450a36e604fe4c91dcccb4fd2a4a35ef1df..8225416911aed6d95f91ed5cb98ef1fb843101bb 100644 (file)
@@ -770,15 +770,6 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
        return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
 }
 
-static void
-warn_incomplete_ethtool_legacy_settings_conversion(const char *details)
-{
-       char name[sizeof(current->comm)];
-
-       pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n",
-                    get_task_comm(name, current), details);
-}
-
 /* Query device for its ethtool_cmd settings.
  *
  * Backward compatibility note: for compatibility with legacy ethtool,
@@ -805,10 +796,8 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
                                                           &link_ksettings);
                if (err < 0)
                        return err;
-               if (!convert_link_ksettings_to_legacy_settings(&cmd,
-                                                              &link_ksettings))
-                       warn_incomplete_ethtool_legacy_settings_conversion(
-                               "link modes are only partially reported");
+               convert_link_ksettings_to_legacy_settings(&cmd,
+                                                         &link_ksettings);
 
                /* send a sensible cmd tag back to user */
                cmd.cmd = ETHTOOL_GSET;
index 6a85e67fafce224b534dd87bb9407ae115f8ba7a..d339ef170df60282c8812b63148c479820b3008f 100644 (file)
@@ -1054,11 +1054,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
                 */
                goto out_err_free;
 
-       /* We are guaranteed to never error here with cBPF to eBPF
-        * transitions, since there's no issue with type compatibility
-        * checks on program arrays.
-        */
        fp = bpf_prog_select_runtime(fp, &err);
+       if (err)
+               goto out_err_free;
 
        kfree(old_prog);
        return fp;
index dabba2a91fc8ff5852681dcee53149d99798ed79..778d7f03404a6631607f29c3267a0e39a40ac57c 100644 (file)
@@ -1681,18 +1681,18 @@ static bool link_dump_filtered(struct net_device *dev,
        return false;
 }
 
-static struct net *get_target_net(struct sk_buff *skb, int netnsid)
+static struct net *get_target_net(struct sock *sk, int netnsid)
 {
        struct net *net;
 
-       net = get_net_ns_by_id(sock_net(skb->sk), netnsid);
+       net = get_net_ns_by_id(sock_net(sk), netnsid);
        if (!net)
                return ERR_PTR(-EINVAL);
 
        /* For now, the caller is required to have CAP_NET_ADMIN in
         * the user namespace owning the target net ns.
         */
-       if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+       if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
                put_net(net);
                return ERR_PTR(-EACCES);
        }
@@ -1733,7 +1733,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        ifla_policy, NULL) >= 0) {
                if (tb[IFLA_IF_NETNSID]) {
                        netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
-                       tgt_net = get_target_net(skb, netnsid);
+                       tgt_net = get_target_net(skb->sk, netnsid);
                        if (IS_ERR(tgt_net)) {
                                tgt_net = net;
                                netnsid = -1;
@@ -2883,7 +2883,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        if (tb[IFLA_IF_NETNSID]) {
                netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
-               tgt_net = get_target_net(skb, netnsid);
+               tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
                if (IS_ERR(tgt_net))
                        return PTR_ERR(tgt_net);
        }
index 217f4e3b82f6edca841b66089ab0772bd8d391e7..146b50e30659daca3bdc4413d800890ef482f521 100644 (file)
@@ -288,7 +288,7 @@ static int sock_diag_bind(struct net *net, int group)
        case SKNLGRP_INET6_UDP_DESTROY:
                if (!sock_diag_handlers[AF_INET6])
                        request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
-                                      NETLINK_SOCK_DIAG, AF_INET);
+                                      NETLINK_SOCK_DIAG, AF_INET6);
                break;
        }
        return 0;
index cbc3dde4cfcccae89ba262bb032edbf13639321f..a47ad6cd41c0396558ee6666ae45e78f36d64bd5 100644 (file)
@@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = {
                .data           = &bpf_jit_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
                .proc_handler   = proc_dointvec
+#else
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+               .extra2         = &one,
+#endif
        },
 # ifdef CONFIG_HAVE_EBPF_JIT
        {
index 125c1eab3eaa6d894804c3aa8918aa7fcc736ca0..5e570aa9e43b77f5c9ccd267319f762200f732d2 100644 (file)
@@ -520,9 +520,11 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                goto out;
 
        /* hdrincl should be READ_ONCE(inet->hdrincl)
-        * but READ_ONCE() doesn't work with bit fields
+        * but READ_ONCE() doesn't work with bit fields.
+        * Doing this indirectly yields the same result.
         */
        hdrincl = inet->hdrincl;
+       hdrincl = READ_ONCE(hdrincl);
        /*
         *      Check the flags.
         */
index 83bd75713535cf8fadf348e5d452445aba9c426c..bc68eb661970a5404e6cfb060b0011680d104ed5 100644 (file)
@@ -925,6 +925,15 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
        sr_phdr->segments[0] = **addr_p;
        *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
 
+       if (sr_ihdr->hdrlen > hops * 2) {
+               int tlvs_offset, tlvs_length;
+
+               tlvs_offset = (1 + hops * 2) << 3;
+               tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
+               memcpy((char *)sr_phdr + tlvs_offset,
+                      (char *)sr_ihdr + tlvs_offset, tlvs_length);
+       }
+
 #ifdef CONFIG_IPV6_SEG6_HMAC
        if (sr_has_hmac(sr_phdr)) {
                struct net *net = NULL;
index f5285f4e1d08acb60d42fb6fd10a0c38a239324f..9dcc3924a97561d689a1fd8862742d9543339dd2 100644 (file)
@@ -640,6 +640,11 @@ static struct fib6_node *fib6_add_1(struct net *net,
                        if (!(fn->fn_flags & RTN_RTINFO)) {
                                RCU_INIT_POINTER(fn->leaf, NULL);
                                rt6_release(leaf);
+                       /* remove null_entry in the root node */
+                       } else if (fn->fn_flags & RTN_TL_ROOT &&
+                                  rcu_access_pointer(fn->leaf) ==
+                                  net->ipv6.ip6_null_entry) {
+                               RCU_INIT_POINTER(fn->leaf, NULL);
                        }
 
                        return fn;
@@ -1241,23 +1246,28 @@ out:
                 * If fib6_add_1 has cleared the old leaf pointer in the
                 * super-tree leaf node we have to find a new one for it.
                 */
-               struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf,
-                                           lockdep_is_held(&table->tb6_lock));
-               if (pn != fn && pn_leaf == rt) {
-                       pn_leaf = NULL;
-                       RCU_INIT_POINTER(pn->leaf, NULL);
-                       atomic_dec(&rt->rt6i_ref);
-               }
-               if (pn != fn && !pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
-                       pn_leaf = fib6_find_prefix(info->nl_net, table, pn);
-#if RT6_DEBUG >= 2
-                       if (!pn_leaf) {
-                               WARN_ON(!pn_leaf);
-                               pn_leaf = info->nl_net->ipv6.ip6_null_entry;
+               if (pn != fn) {
+                       struct rt6_info *pn_leaf =
+                               rcu_dereference_protected(pn->leaf,
+                                   lockdep_is_held(&table->tb6_lock));
+                       if (pn_leaf == rt) {
+                               pn_leaf = NULL;
+                               RCU_INIT_POINTER(pn->leaf, NULL);
+                               atomic_dec(&rt->rt6i_ref);
                        }
+                       if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
+                               pn_leaf = fib6_find_prefix(info->nl_net, table,
+                                                          pn);
+#if RT6_DEBUG >= 2
+                               if (!pn_leaf) {
+                                       WARN_ON(!pn_leaf);
+                                       pn_leaf =
+                                           info->nl_net->ipv6.ip6_null_entry;
+                               }
 #endif
-                       atomic_inc(&pn_leaf->rt6i_ref);
-                       rcu_assign_pointer(pn->leaf, pn_leaf);
+                               atomic_inc(&pn_leaf->rt6i_ref);
+                               rcu_assign_pointer(pn->leaf, pn_leaf);
+                       }
                }
 #endif
                goto failure;
@@ -1265,13 +1275,17 @@ out:
        return err;
 
 failure:
-       /* fn->leaf could be NULL if fn is an intermediate node and we
-        * failed to add the new route to it in both subtree creation
-        * failure and fib6_add_rt2node() failure case.
-        * In both cases, fib6_repair_tree() should be called to fix
-        * fn->leaf.
+       /* fn->leaf could be NULL and fib6_repair_tree() needs to be called if:
+        * 1. fn is an intermediate node and we failed to add the new
+        * route to it in both subtree creation failure and fib6_add_rt2node()
+        * failure case.
+        * 2. fn is the root node in the table and we fail to add the first
+        * default route to it.
         */
-       if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
+       if (fn &&
+           (!(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)) ||
+            (fn->fn_flags & RTN_TL_ROOT &&
+             !rcu_access_pointer(fn->leaf))))
                fib6_repair_tree(info->nl_net, table, fn);
        /* Always release dst as dst->__refcnt is guaranteed
         * to be taken before entering this function
@@ -1526,6 +1540,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
        struct fib6_walker *w;
        int iter = 0;
 
+       /* Set fn->leaf to null_entry for root node. */
+       if (fn->fn_flags & RTN_TL_ROOT) {
+               rcu_assign_pointer(fn->leaf, net->ipv6.ip6_null_entry);
+               return fn;
+       }
+
        for (;;) {
                struct fib6_node *fn_r = rcu_dereference_protected(fn->right,
                                            lockdep_is_held(&table->tb6_lock));
@@ -1680,10 +1700,15 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
        }
        read_unlock(&net->ipv6.fib6_walker_lock);
 
-       /* If it was last route, expunge its radix tree node */
+       /* If it was last route, call fib6_repair_tree() to:
+        * 1. For root node, put back null_entry as how the table was created.
+        * 2. For other nodes, expunge its radix tree node.
+        */
        if (!rcu_access_pointer(fn->leaf)) {
-               fn->fn_flags &= ~RTN_RTINFO;
-               net->ipv6.rt6_stats->fib_route_nodes--;
+               if (!(fn->fn_flags & RTN_TL_ROOT)) {
+                       fn->fn_flags &= ~RTN_RTINFO;
+                       net->ipv6.rt6_stats->fib_route_nodes--;
+               }
                fn = fib6_repair_tree(net, table, fn);
        }
 
index f7dd51c4231415fd1321fd431194d896ea2d1689..688ba5f7516b37c87b879036dce781bdcfa01739 100644 (file)
@@ -1735,9 +1735,10 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
        cork.base.opt = NULL;
        v6_cork.opt = NULL;
        err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
-       if (err)
+       if (err) {
+               ip6_cork_release(&cork, &v6_cork);
                return ERR_PTR(err);
-
+       }
        if (ipc6->dontfrag < 0)
                ipc6->dontfrag = inet6_sk(sk)->dontfrag;
 
index 931c38f6ff4a42fb17cf129cf6035706a24176dc..9a7cf355bc8c8ff67388456060c1f7e67d8762ee 100644 (file)
@@ -1074,10 +1074,11 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
                        memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
                        neigh_release(neigh);
                }
-       } else if (!(t->parms.flags &
-                    (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
-               /* enable the cache only only if the routing decision does
-                * not depend on the current inner header value
+       } else if (t->parms.proto != 0 && !(t->parms.flags &
+                                           (IP6_TNL_F_USE_ORIG_TCLASS |
+                                            IP6_TNL_F_USE_ORIG_FWMARK))) {
+               /* enable the cache only if neither the outer protocol nor the
+                * routing decision depends on the current inner header value
                 */
                use_cache = true;
        }
@@ -1676,11 +1677,11 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct ip6_tnl *tnl = netdev_priv(dev);
 
-       if (tnl->parms.proto == IPPROTO_IPIP) {
-               if (new_mtu < ETH_MIN_MTU)
+       if (tnl->parms.proto == IPPROTO_IPV6) {
+               if (new_mtu < IPV6_MIN_MTU)
                        return -EINVAL;
        } else {
-               if (new_mtu < IPV6_MIN_MTU)
+               if (new_mtu < ETH_MIN_MTU)
                        return -EINVAL;
        }
        if (new_mtu > 0xFFF8 - dev->hard_header_len)
index 70e9d2ca8bbec176edac094268247bb125e3cbdc..4daafb07602f5f9727af7bec4ea7603901438b63 100644 (file)
@@ -3632,6 +3632,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
                }
                return true;
        case NL80211_IFTYPE_MESH_POINT:
+               if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
+                       return false;
                if (multicast)
                        return true;
                return ether_addr_equal(sdata->vif.addr, hdr->addr1);
index 10798b35748180746266aa0a84187dfd7f0f1ceb..07bd4138c84ef250decc00a5c70df84298124a3a 100644 (file)
@@ -2072,7 +2072,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
                                continue;
 
                        list_for_each_entry_rcu(chain, &table->chains, list) {
-                               if (ctx && ctx->chain[0] &&
+                               if (ctx && ctx->chain &&
                                    strcmp(ctx->chain, chain->name) != 0)
                                        continue;
 
@@ -4665,8 +4665,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
 {
        struct nft_obj_filter *filter = cb->data;
 
-       kfree(filter->table);
-       kfree(filter);
+       if (filter) {
+               kfree(filter->table);
+               kfree(filter);
+       }
 
        return 0;
 }
index bc2f1e0977d657ec09f176cfcecf28839eb1fab0..634cfcb7bba6833bde376706947c99f1cb103199 100644 (file)
@@ -525,6 +525,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
 
        local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
 
+       if (args->nr_local == 0)
+               return -EINVAL;
+
        /* figure out the number of pages in the vector */
        for (i = 0; i < args->nr_local; i++) {
                if (copy_from_user(&vec, &local_vec[i],
@@ -874,6 +877,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
 err:
        if (page)
                put_page(page);
+       rm->atomic.op_active = 0;
        kfree(rm->atomic.op_notifier);
 
        return ret;
index e29a48ef7fc348aefdc720cf08d1509ac5b53559..a0ac42b3ed0652fe897d59bd043c189af4fa0a12 100644 (file)
@@ -159,7 +159,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
        if (action == TC_ACT_SHOT)
                this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
 
-       tm->lastuse = lastuse;
+       tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
index 8b3e593884803370ea27ef5fe770fa5ae829a888..08b61849c2a2f448fa0a29b0c93037ebbbafd0e7 100644 (file)
@@ -239,7 +239,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
        struct tcf_t *tm = &m->tcf_tm;
 
        _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
-       tm->lastuse = lastuse;
+       tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
index 621b5ca3fd1c17c3d7ef7bb1c7677ab98cebbe77..141c9c466ec172ff67930cf38eb02a49e01a12ab 100644 (file)
@@ -399,20 +399,24 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
                return;
        }
 
-       if (t->param_flags & SPP_PMTUD_ENABLE) {
-               /* Update transports view of the MTU */
-               sctp_transport_update_pmtu(t, pmtu);
-
-               /* Update association pmtu. */
-               sctp_assoc_sync_pmtu(asoc);
-       }
+       if (!(t->param_flags & SPP_PMTUD_ENABLE))
+               /* We can't allow retransmitting in such case, as the
+                * retransmission would be sized just as before, and thus we
+                * would get another icmp, and retransmit again.
+                */
+               return;
 
-       /* Retransmit with the new pmtu setting.
-        * Normally, if PMTU discovery is disabled, an ICMP Fragmentation
-        * Needed will never be sent, but if a message was sent before
-        * PMTU discovery was disabled that was larger than the PMTU, it
-        * would not be fragmented, so it must be re-transmitted fragmented.
+       /* Update transports view of the MTU. Return if no update was needed.
+        * If an update wasn't needed/possible, it also doesn't make sense to
+        * try to retransmit now.
         */
+       if (!sctp_transport_update_pmtu(t, pmtu))
+               return;
+
+       /* Update association pmtu. */
+       sctp_assoc_sync_pmtu(asoc);
+
+       /* Retransmit with the new pmtu setting. */
        sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
 }
 
index b4fb6e4886d264f302fc67b8d822cd8bdb5f3f41..9b01e994f66108a12abc31f452482f1de8749d84 100644 (file)
@@ -2277,7 +2277,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
 
                if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
                        event = sctp_ulpevent_make_sender_dry_event(asoc,
-                                       GFP_ATOMIC);
+                                       GFP_USER | __GFP_NOWARN);
                        if (!event)
                                return -ENOMEM;
 
@@ -3498,6 +3498,8 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
 
        if (optlen < sizeof(struct sctp_hmacalgo))
                return -EINVAL;
+       optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
+                                            SCTP_AUTH_NUM_HMACS * sizeof(u16));
 
        hmacs = memdup_user(optval, optlen);
        if (IS_ERR(hmacs))
@@ -3536,6 +3538,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
 
        if (optlen <= sizeof(struct sctp_authkey))
                return -EINVAL;
+       /* authkey->sca_keylength is u16, so optlen can't be bigger than
+        * this.
+        */
+       optlen = min_t(unsigned int, optlen, USHRT_MAX +
+                                            sizeof(struct sctp_authkey));
 
        authkey = memdup_user(optval, optlen);
        if (IS_ERR(authkey))
@@ -3893,6 +3900,9 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
 
        if (optlen < sizeof(*params))
                return -EINVAL;
+       /* srs_number_streams is u16, so optlen can't be bigger than this. */
+       optlen = min_t(unsigned int, optlen, USHRT_MAX +
+                                            sizeof(__u16) * sizeof(*params));
 
        params = memdup_user(optval, optlen);
        if (IS_ERR(params))
@@ -5015,7 +5025,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
        len = sizeof(int);
        if (put_user(len, optlen))
                return -EFAULT;
-       if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
+       if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
                return -EFAULT;
        return 0;
 }
@@ -5645,6 +5655,9 @@ copy_getaddrs:
                err = -EFAULT;
                goto out;
        }
+       /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
+        * but we can't change it anymore.
+        */
        if (put_user(bytes_copied, optlen))
                err = -EFAULT;
 out:
@@ -6081,7 +6094,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
                params.assoc_id = 0;
        } else if (len >= sizeof(struct sctp_assoc_value)) {
                len = sizeof(struct sctp_assoc_value);
-               if (copy_from_user(&params, optval, sizeof(params)))
+               if (copy_from_user(&params, optval, len))
                        return -EFAULT;
        } else
                return -EINVAL;
@@ -6251,7 +6264,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
 
        if (len < sizeof(struct sctp_authkeyid))
                return -EINVAL;
-       if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
+
+       len = sizeof(struct sctp_authkeyid);
+       if (copy_from_user(&val, optval, len))
                return -EFAULT;
 
        asoc = sctp_id2assoc(sk, val.scact_assoc_id);
@@ -6263,7 +6278,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
        else
                val.scact_keynumber = ep->active_key_id;
 
-       len = sizeof(struct sctp_authkeyid);
        if (put_user(len, optlen))
                return -EFAULT;
        if (copy_to_user(optval, &val, len))
@@ -6289,7 +6303,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
        if (len < sizeof(struct sctp_authchunks))
                return -EINVAL;
 
-       if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
+       if (copy_from_user(&val, optval, sizeof(val)))
                return -EFAULT;
 
        to = p->gauth_chunks;
@@ -6334,7 +6348,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
        if (len < sizeof(struct sctp_authchunks))
                return -EINVAL;
 
-       if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
+       if (copy_from_user(&val, optval, sizeof(val)))
                return -EFAULT;
 
        to = p->gauth_chunks;
index 76ea66be0bbee7d3f018676d52c8b95ba06dbcb1..524dfeb94c41ab1ac735746a8acf93af1c96ae48 100644 (file)
@@ -156,9 +156,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
        sctp_stream_outq_migrate(stream, NULL, outcnt);
        sched->sched_all(stream);
 
-       i = sctp_stream_alloc_out(stream, outcnt, gfp);
-       if (i)
-               return i;
+       ret = sctp_stream_alloc_out(stream, outcnt, gfp);
+       if (ret)
+               goto out;
 
        stream->outcnt = outcnt;
        for (i = 0; i < stream->outcnt; i++)
@@ -170,19 +170,17 @@ in:
        if (!incnt)
                goto out;
 
-       i = sctp_stream_alloc_in(stream, incnt, gfp);
-       if (i) {
-               ret = -ENOMEM;
-               goto free;
+       ret = sctp_stream_alloc_in(stream, incnt, gfp);
+       if (ret) {
+               sched->free(stream);
+               kfree(stream->out);
+               stream->out = NULL;
+               stream->outcnt = 0;
+               goto out;
        }
 
        stream->incnt = incnt;
-       goto out;
 
-free:
-       sched->free(stream);
-       kfree(stream->out);
-       stream->out = NULL;
 out:
        return ret;
 }
index 1e5a22430cf56e40a6f323081beb97836b506384..47f82bd794d915188bad037463c2aa14175a55ef 100644 (file)
@@ -248,28 +248,37 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
                transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 }
 
-void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 {
        struct dst_entry *dst = sctp_transport_dst_check(t);
+       bool change = true;
 
        if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
-               pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
-                       __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
-               /* Use default minimum segment size and disable
-                * pmtu discovery on this transport.
-                */
-               t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
-       } else {
-               t->pathmtu = pmtu;
+               pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
+                                   __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
+               /* Use default minimum segment instead */
+               pmtu = SCTP_DEFAULT_MINSEGMENT;
        }
+       pmtu = SCTP_TRUNC4(pmtu);
 
        if (dst) {
                dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
                dst = sctp_transport_dst_check(t);
        }
 
-       if (!dst)
+       if (!dst) {
                t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
+               dst = t->dst;
+       }
+
+       if (dst) {
+               /* Re-fetch, as under layers may have a higher minimum size */
+               pmtu = SCTP_TRUNC4(dst_mtu(dst));
+               change = t->pathmtu != pmtu;
+       }
+       t->pathmtu = pmtu;
+
+       return change;
 }
 
 /* Caches the dst entry and source address for a transport's destination
index 05f361faec451cdd69168dd5e2cd7c2ca7c8f7fb..6f05d5c4bf30759ea69944bd212891b9ce49469e 100644 (file)
@@ -436,8 +436,10 @@ static int sock_map_fd(struct socket *sock, int flags)
 {
        struct file *newfile;
        int fd = get_unused_fd_flags(flags);
-       if (unlikely(fd < 0))
+       if (unlikely(fd < 0)) {
+               sock_release(sock);
                return fd;
+       }
 
        newfile = sock_alloc_file(sock, flags, NULL);
        if (likely(!IS_ERR(newfile))) {
@@ -2619,6 +2621,15 @@ out_fs:
 
 core_initcall(sock_init);      /* early initcall */
 
+static int __init jit_init(void)
+{
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+       bpf_jit_enable = 1;
+#endif
+       return 0;
+}
+pure_initcall(jit_init);
+
 #ifdef CONFIG_PROC_FS
 void socket_seq_show(struct seq_file *seq)
 {
index 8e12ab55346b0cf45b9244c470b36c5f81db30d2..5f4ffae807eee899a51ab79c258d3b26a8c863d9 100644 (file)
@@ -109,7 +109,8 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
 static void tipc_group_decr_active(struct tipc_group *grp,
                                   struct tipc_member *m)
 {
-       if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING)
+       if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
+           m->state == MBR_REMITTED)
                grp->active_cnt--;
 }
 
@@ -562,7 +563,7 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
        int max_active = grp->max_active;
        int reclaim_limit = max_active * 3 / 4;
        int active_cnt = grp->active_cnt;
-       struct tipc_member *m, *rm;
+       struct tipc_member *m, *rm, *pm;
 
        m = tipc_group_find_member(grp, node, port);
        if (!m)
@@ -605,6 +606,17 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
                        pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
                        tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
                }
+               grp->active_cnt--;
+               list_del_init(&m->list);
+               if (list_empty(&grp->pending))
+                       return;
+
+               /* Set oldest pending member to active and advertise */
+               pm = list_first_entry(&grp->pending, struct tipc_member, list);
+               pm->state = MBR_ACTIVE;
+               list_move_tail(&pm->list, &grp->active);
+               grp->active_cnt++;
+               tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
                break;
        case MBR_RECLAIMING:
        case MBR_DISCOVERED:
@@ -742,14 +754,14 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                if (!m || m->state != MBR_RECLAIMING)
                        return;
 
-               list_del_init(&m->list);
-               grp->active_cnt--;
                remitted = msg_grp_remitted(hdr);
 
                /* Messages preceding the REMIT still in receive queue */
                if (m->advertised > remitted) {
                        m->state = MBR_REMITTED;
                        in_flight = m->advertised - remitted;
+                       m->advertised = ADV_IDLE + in_flight;
+                       return;
                }
                /* All messages preceding the REMIT have been read */
                if (m->advertised <= remitted) {
@@ -761,6 +773,8 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                        tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
 
                m->advertised = ADV_IDLE + in_flight;
+               grp->active_cnt--;
+               list_del_init(&m->list);
 
                /* Set oldest pending member to active and advertise */
                if (list_empty(&grp->pending))
index 213d0c498c97d78b17c81d1fd8b850c8768f7057..2b3dbcd40e46390debf84f4a127b23f185e76b39 100644 (file)
@@ -11361,7 +11361,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
                break;
        case NL80211_NAN_FUNC_FOLLOW_UP:
                if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
-                   !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) {
+                   !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
+                   !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
                        err = -EINVAL;
                        goto out;
                }
index e49f448ee04f4af00a7c7c00755f8a29b226bb6f..c2db7e905f7d69b4f9b153c8a72b0f463c3d4287 100644 (file)
@@ -455,7 +455,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm,
                v = snd_pcm_hw_param_last(pcm, params, var, dir);
        else
                v = snd_pcm_hw_param_first(pcm, params, var, dir);
-       snd_BUG_ON(v < 0);
        return v;
 }
 
@@ -1335,8 +1334,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
 
        if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
                return tmp;
-       mutex_lock(&runtime->oss.params_lock);
        while (bytes > 0) {
+               if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
                if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
                        tmp = bytes;
                        if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
@@ -1380,14 +1382,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
                        xfer += tmp;
                        if ((substream->f_flags & O_NONBLOCK) != 0 &&
                            tmp != runtime->oss.period_bytes)
-                               break;
+                               tmp = -EAGAIN;
                }
-       }
-       mutex_unlock(&runtime->oss.params_lock);
-       return xfer;
-
  err:
-       mutex_unlock(&runtime->oss.params_lock);
+               mutex_unlock(&runtime->oss.params_lock);
+               if (tmp < 0)
+                       break;
+               if (signal_pending(current)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
+               tmp = 0;
+       }
        return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
 }
 
@@ -1435,8 +1441,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
 
        if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
                return tmp;
-       mutex_lock(&runtime->oss.params_lock);
        while (bytes > 0) {
+               if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
                if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
                        if (runtime->oss.buffer_used == 0) {
                                tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
@@ -1467,12 +1476,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
                        bytes -= tmp;
                        xfer += tmp;
                }
-       }
-       mutex_unlock(&runtime->oss.params_lock);
-       return xfer;
-
  err:
-       mutex_unlock(&runtime->oss.params_lock);
+               mutex_unlock(&runtime->oss.params_lock);
+               if (tmp < 0)
+                       break;
+               if (signal_pending(current)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
+               tmp = 0;
+       }
        return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
 }
 
index cadc937928683cb3abcf7ec8208e8159df31c00f..85a56af104bd619160ace33f51ad2786eca3279a 100644 (file)
@@ -592,18 +592,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
        snd_pcm_sframes_t frames = size;
 
        plugin = snd_pcm_plug_first(plug);
-       while (plugin && frames > 0) {
+       while (plugin) {
+               if (frames <= 0)
+                       return frames;
                if ((next = plugin->next) != NULL) {
                        snd_pcm_sframes_t frames1 = frames;
-                       if (plugin->dst_frames)
+                       if (plugin->dst_frames) {
                                frames1 = plugin->dst_frames(plugin, frames);
+                               if (frames1 <= 0)
+                                       return frames1;
+                       }
                        if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
                                return err;
                        }
                        if (err != frames1) {
                                frames = err;
-                               if (plugin->src_frames)
+                               if (plugin->src_frames) {
                                        frames = plugin->src_frames(plugin, frames1);
+                                       if (frames <= 0)
+                                               return frames;
+                               }
                        }
                } else
                        dst_channels = NULL;
index 10e7ef7a8804b1ba0d88b6a0c4ad478f38a9397e..db7894bb028ccc6ca941c45c4a7e37602ec1228d 100644 (file)
@@ -1632,7 +1632,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
                return changed;
        if (params->rmask) {
                int err = snd_pcm_hw_refine(pcm, params);
-               if (snd_BUG_ON(err < 0))
+               if (err < 0)
                        return err;
        }
        return snd_pcm_hw_param_value(params, var, dir);
@@ -1678,7 +1678,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
                return changed;
        if (params->rmask) {
                int err = snd_pcm_hw_refine(pcm, params);
-               if (snd_BUG_ON(err < 0))
+               if (err < 0)
                        return err;
        }
        return snd_pcm_hw_param_value(params, var, dir);
index a4d92e46c459b014287b729f9fe6b9d9e5c19ebf..f08772568c1709ab560d7ed6db1bf418b08c984c 100644 (file)
@@ -2580,7 +2580,7 @@ static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
        return ret < 0 ? ret : frames;
 }
 
-/* decrease the appl_ptr; returns the processed frames or a negative error */
+/* decrease the appl_ptr; returns the processed frames or zero for error */
 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
                                         snd_pcm_uframes_t frames,
                                         snd_pcm_sframes_t avail)
@@ -2597,7 +2597,12 @@ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
        if (appl_ptr < 0)
                appl_ptr += runtime->boundary;
        ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
-       return ret < 0 ? ret : frames;
+       /* NOTE: we return zero for errors because PulseAudio gets depressed
+        * upon receiving an error from rewind ioctl and stops processing
+        * any longer.  Returning zero means that no rewind is done, so
+        * it's not absolutely wrong to answer like that.
+        */
+       return ret < 0 ? 0 : frames;
 }
 
 static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
index afac886ffa28a7c7cef39bb3c4f5d1ba816e890a..0333143a1fa7ad646c4da4b25eb653b2d3a1a3fb 100644 (file)
@@ -39,6 +39,7 @@
 #include <sound/core.h>
 #include <sound/control.h>
 #include <sound/pcm.h>
+#include <sound/pcm_params.h>
 #include <sound/info.h>
 #include <sound/initval.h>
 
@@ -305,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
        return 0;
 }
 
-static void params_change_substream(struct loopback_pcm *dpcm,
-                                   struct snd_pcm_runtime *runtime)
-{
-       struct snd_pcm_runtime *dst_runtime;
-
-       if (dpcm == NULL || dpcm->substream == NULL)
-               return;
-       dst_runtime = dpcm->substream->runtime;
-       if (dst_runtime == NULL)
-               return;
-       dst_runtime->hw = dpcm->cable->hw;
-}
-
 static void params_change(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
@@ -329,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream)
        cable->hw.rate_max = runtime->rate;
        cable->hw.channels_min = runtime->channels;
        cable->hw.channels_max = runtime->channels;
-       params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK],
-                               runtime);
-       params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE],
-                               runtime);
 }
 
 static int loopback_prepare(struct snd_pcm_substream *substream)
@@ -620,26 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream)
 static int rule_format(struct snd_pcm_hw_params *params,
                       struct snd_pcm_hw_rule *rule)
 {
+       struct loopback_pcm *dpcm = rule->private;
+       struct loopback_cable *cable = dpcm->cable;
+       struct snd_mask m;
 
-       struct snd_pcm_hardware *hw = rule->private;
-       struct snd_mask *maskp = hw_param_mask(params, rule->var);
-
-       maskp->bits[0] &= (u_int32_t)hw->formats;
-       maskp->bits[1] &= (u_int32_t)(hw->formats >> 32);
-       memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
-       if (! maskp->bits[0] && ! maskp->bits[1])
-               return -EINVAL;
-       return 0;
+       snd_mask_none(&m);
+       mutex_lock(&dpcm->loopback->cable_lock);
+       m.bits[0] = (u_int32_t)cable->hw.formats;
+       m.bits[1] = (u_int32_t)(cable->hw.formats >> 32);
+       mutex_unlock(&dpcm->loopback->cable_lock);
+       return snd_mask_refine(hw_param_mask(params, rule->var), &m);
 }
 
 static int rule_rate(struct snd_pcm_hw_params *params,
                     struct snd_pcm_hw_rule *rule)
 {
-       struct snd_pcm_hardware *hw = rule->private;
+       struct loopback_pcm *dpcm = rule->private;
+       struct loopback_cable *cable = dpcm->cable;
        struct snd_interval t;
 
-        t.min = hw->rate_min;
-        t.max = hw->rate_max;
+       mutex_lock(&dpcm->loopback->cable_lock);
+       t.min = cable->hw.rate_min;
+       t.max = cable->hw.rate_max;
+       mutex_unlock(&dpcm->loopback->cable_lock);
         t.openmin = t.openmax = 0;
         t.integer = 0;
        return snd_interval_refine(hw_param_interval(params, rule->var), &t);
@@ -648,22 +635,44 @@ static int rule_rate(struct snd_pcm_hw_params *params,
 static int rule_channels(struct snd_pcm_hw_params *params,
                         struct snd_pcm_hw_rule *rule)
 {
-       struct snd_pcm_hardware *hw = rule->private;
+       struct loopback_pcm *dpcm = rule->private;
+       struct loopback_cable *cable = dpcm->cable;
        struct snd_interval t;
 
-        t.min = hw->channels_min;
-        t.max = hw->channels_max;
+       mutex_lock(&dpcm->loopback->cable_lock);
+       t.min = cable->hw.channels_min;
+       t.max = cable->hw.channels_max;
+       mutex_unlock(&dpcm->loopback->cable_lock);
         t.openmin = t.openmax = 0;
         t.integer = 0;
        return snd_interval_refine(hw_param_interval(params, rule->var), &t);
 }
 
+static void free_cable(struct snd_pcm_substream *substream)
+{
+       struct loopback *loopback = substream->private_data;
+       int dev = get_cable_index(substream);
+       struct loopback_cable *cable;
+
+       cable = loopback->cables[substream->number][dev];
+       if (!cable)
+               return;
+       if (cable->streams[!substream->stream]) {
+               /* other stream is still alive */
+               cable->streams[substream->stream] = NULL;
+       } else {
+               /* free the cable */
+               loopback->cables[substream->number][dev] = NULL;
+               kfree(cable);
+       }
+}
+
 static int loopback_open(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct loopback *loopback = substream->private_data;
        struct loopback_pcm *dpcm;
-       struct loopback_cable *cable;
+       struct loopback_cable *cable = NULL;
        int err = 0;
        int dev = get_cable_index(substream);
 
@@ -681,7 +690,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
        if (!cable) {
                cable = kzalloc(sizeof(*cable), GFP_KERNEL);
                if (!cable) {
-                       kfree(dpcm);
                        err = -ENOMEM;
                        goto unlock;
                }
@@ -699,19 +707,19 @@ static int loopback_open(struct snd_pcm_substream *substream)
        /* are cached -> they do not reflect the actual state */
        err = snd_pcm_hw_rule_add(runtime, 0,
                                  SNDRV_PCM_HW_PARAM_FORMAT,
-                                 rule_format, &runtime->hw,
+                                 rule_format, dpcm,
                                  SNDRV_PCM_HW_PARAM_FORMAT, -1);
        if (err < 0)
                goto unlock;
        err = snd_pcm_hw_rule_add(runtime, 0,
                                  SNDRV_PCM_HW_PARAM_RATE,
-                                 rule_rate, &runtime->hw,
+                                 rule_rate, dpcm,
                                  SNDRV_PCM_HW_PARAM_RATE, -1);
        if (err < 0)
                goto unlock;
        err = snd_pcm_hw_rule_add(runtime, 0,
                                  SNDRV_PCM_HW_PARAM_CHANNELS,
-                                 rule_channels, &runtime->hw,
+                                 rule_channels, dpcm,
                                  SNDRV_PCM_HW_PARAM_CHANNELS, -1);
        if (err < 0)
                goto unlock;
@@ -723,6 +731,10 @@ static int loopback_open(struct snd_pcm_substream *substream)
        else
                runtime->hw = cable->hw;
  unlock:
+       if (err < 0) {
+               free_cable(substream);
+               kfree(dpcm);
+       }
        mutex_unlock(&loopback->cable_lock);
        return err;
 }
@@ -731,20 +743,10 @@ static int loopback_close(struct snd_pcm_substream *substream)
 {
        struct loopback *loopback = substream->private_data;
        struct loopback_pcm *dpcm = substream->runtime->private_data;
-       struct loopback_cable *cable;
-       int dev = get_cable_index(substream);
 
        loopback_timer_stop(dpcm);
        mutex_lock(&loopback->cable_lock);
-       cable = loopback->cables[substream->number][dev];
-       if (cable->streams[!substream->stream]) {
-               /* other stream is still alive */
-               cable->streams[substream->stream] = NULL;
-       } else {
-               /* free the cable */
-               loopback->cables[substream->number][dev] = NULL;
-               kfree(cable);
-       }
+       free_cable(substream);
        mutex_unlock(&loopback->cable_lock);
        return 0;
 }
index 8591c89c0828a9ff780c63f4f83f3366f79e198f..471bbbdb94db75ece343f6a5191b7501dea1ee85 100644 (file)
@@ -474,27 +474,7 @@ static struct bpf_align_test tests[] = {
                .result = REJECT,
                .matches = {
                        {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
-                       /* ptr & 0x40 == either 0 or 0x40 */
-                       {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
-                       /* ptr << 2 == unknown, (4n) */
-                       {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
-                       /* (4n) + 14 == (4n+2).  We blow our bounds, because
-                        * the add could overflow.
-                        */
-                       {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
-                       /* Checked s>=0 */
-                       {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
-                       /* packet pointer + nonnegative (4n+2) */
-                       {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
-                       {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
-                       /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
-                        * We checked the bounds, but it might have been able
-                        * to overflow if the packet pointer started in the
-                        * upper half of the address space.
-                        * So we did not get a 'range' on R6, and the access
-                        * attempt will fail.
-                        */
-                       {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+                       /* R5 bitwise operator &= on pointer prohibited */
                }
        },
        {