]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge tag 'riscv-for-linus-4.15-maintainers' of git://git.kernel.org/pub/scm/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Jan 2018 23:10:50 +0000 (15:10 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Jan 2018 23:10:50 +0000 (15:10 -0800)
Pull RISC-V update from Palmer Dabbelt:
 "RISC-V: We have a new mailing list and git repo!

  Sorry to send something essentially as late as possible (Friday after
  an rc9), but we managed to get a mailing list for the RISC-V Linux
  port. We've been using patches@groups.riscv.org for a while, but that
  list has some problems (it's Google Groups and it's shared over all
  RISC-V software projects). The new infaread.org list is much better.
  We just got it on Wednesday but I used it a bit on Thursday to shake
  out all the configuration problems and it appears to be in working
  order.

  When I updated the mailing list I noticed that the MAINTAINERS file
  was pointing to our github repo, but now that we have a kernel.org
  repo I'd like to point to that instead so I changed that as well.
  We'll be centralizing all RISC-V Linux related development here as
  that seems to be the saner way to go about it.

  I can understand if it's too late to get this into 4.15, but given
  that it's not a code change I was hoping it'd still be OK. It would be
  nice to have the new mailing list and git repo in the release tarballs
  so when people start to find bugs they'll get to the right place"

* tag 'riscv-for-linus-4.15-maintainers' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux:
  Update the RISC-V MAINTAINERS file

83 files changed:
MAINTAINERS
arch/s390/kvm/kvm-s390.c
arch/sparc/crypto/Makefile
arch/x86/kernel/Makefile
arch/x86/kernel/ftrace_64.S
arch/x86/kernel/unwind_orc.c
arch/x86/pci/fixup.c
drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
drivers/gpu/drm/vc4/vc4_gem.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/input/joystick/xpad.c
drivers/input/mouse/trackpoint.c
drivers/input/mouse/trackpoint.h
drivers/input/rmi4/rmi_f01.c
drivers/input/touchscreen/s6sy761.c
drivers/input/touchscreen/stmfts.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/emac.h
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/geneve.c
drivers/net/ppp/pppoe.c
drivers/net/usb/usbnet.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/vhost/vhost.c
fs/btrfs/delayed-inode.c
fs/nfsd/auth.c
fs/orangefs/devorangefs-req.c
fs/orangefs/file.c
fs/orangefs/orangefs-kernel.h
fs/orangefs/waitqueue.c
include/linux/ftrace.h
include/linux/swapops.h
include/linux/vermagic.h
include/net/dst.h
include/net/ipv6.h
include/net/net_namespace.h
include/net/pkt_cls.h
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_functions.c
mm/page_vma_mapped.c
net/core/dev.c
net/dccp/ccids/ccid2.c
net/ipv4/esp4_offload.c
net/ipv4/igmp.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/tcp.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_timer.c
net/ipv4/udp_offload.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ipv6_sockglue.c
net/ipv6/sit.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp_offload.c
net/ipv6/xfrm6_mode_tunnel.c
net/kcm/kcmsock.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_send.c
net/sched/em_nbyte.c
net/sctp/offload.c
net/tls/tls_sw.c
net/vmw_vsock/af_vsock.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c

index a3c25b18bd2d988c5f067cb31f52e9639e123bce..cbd1ed6bc915c471a760d37bedacfb8aa7a75cae 100644 (file)
@@ -62,7 +62,15 @@ trivial patch so apply some common sense.
 
 7.     When sending security related changes or reports to a maintainer
        please Cc: security@kernel.org, especially if the maintainer
-       does not respond.
+       does not respond. Please keep in mind that the security team is
+       a small set of people who can be efficient only when working on
+       verified bugs. Please only Cc: this list when you have identified
+       that the bug would present a short-term risk to other users if it
+       were publicly disclosed. For example, reports of address leaks do
+       not represent an immediate threat and are better handled publicly,
+       and ideally, should come with a patch proposal. Please do not send
+       automated reports to this list either. Such bugs will be handled
+       better and faster in the usual public places.
 
 8.     Happy hacking.
 
@@ -12233,7 +12241,7 @@ M:      Security Officers <security@kernel.org>
 S:     Supported
 
 SECURITY SUBSYSTEM
-M:     James Morris <james.l.morris@oracle.com>
+M:     James Morris <jmorris@namei.org>
 M:     "Serge E. Hallyn" <serge@hallyn.com>
 L:     linux-security-module@vger.kernel.org (suggested Cc:)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
index 2598cf243b86e08f21ccb6f9ae6ea2e28465d680..1371dff2b90d14244a52da555d9d4914a1be2f45 100644 (file)
@@ -769,7 +769,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
 
 /*
  * Must be called with kvm->srcu held to avoid races on memslots, and with
- * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
+ * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
  */
 static int kvm_s390_vm_start_migration(struct kvm *kvm)
 {
@@ -825,7 +825,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
 }
 
 /*
- * Must be called with kvm->lock to avoid races with ourselves and
+ * Must be called with kvm->slots_lock to avoid races with ourselves and
  * kvm_s390_vm_start_migration.
  */
 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
@@ -840,6 +840,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
 
        if (kvm->arch.use_cmma) {
                kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
+               /* We have to wait for the essa emulation to finish */
+               synchronize_srcu(&kvm->srcu);
                vfree(mgs->pgste_bitmap);
        }
        kfree(mgs);
@@ -849,14 +851,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
 static int kvm_s390_vm_set_migration(struct kvm *kvm,
                                     struct kvm_device_attr *attr)
 {
-       int idx, res = -ENXIO;
+       int res = -ENXIO;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->slots_lock);
        switch (attr->attr) {
        case KVM_S390_VM_MIGRATION_START:
-               idx = srcu_read_lock(&kvm->srcu);
                res = kvm_s390_vm_start_migration(kvm);
-               srcu_read_unlock(&kvm->srcu, idx);
                break;
        case KVM_S390_VM_MIGRATION_STOP:
                res = kvm_s390_vm_stop_migration(kvm);
@@ -864,7 +864,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm,
        default:
                break;
        }
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->slots_lock);
 
        return res;
 }
@@ -1754,7 +1754,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&args, argp, sizeof(args)))
                        break;
+               mutex_lock(&kvm->slots_lock);
                r = kvm_s390_get_cmma_bits(kvm, &args);
+               mutex_unlock(&kvm->slots_lock);
                if (!r) {
                        r = copy_to_user(argp, &args, sizeof(args));
                        if (r)
@@ -1768,7 +1770,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&args, argp, sizeof(args)))
                        break;
+               mutex_lock(&kvm->slots_lock);
                r = kvm_s390_set_cmma_bits(kvm, &args);
+               mutex_unlock(&kvm->slots_lock);
                break;
        }
        default:
index 818d3aa5172e680f7e138a21b54aeac4f3b0e48f..d257186c27d12d34cdba156412ab00d79f80ff2b 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
 
 obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
 obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
-obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o
+obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
 
 obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
 
index 81bb565f449740c1d94e057c541db3c84df8bc46..7e2baf7304ae491974fedbab7dfaa4311eaa3b9f 100644 (file)
@@ -29,10 +29,13 @@ KASAN_SANITIZE_stacktrace.o                         := n
 KASAN_SANITIZE_paravirt.o                              := n
 
 OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o    := y
-OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o             := y
 OBJECT_FILES_NON_STANDARD_test_nx.o                    := y
 OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o     := y
 
+ifdef CONFIG_FRAME_POINTER
+OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o             := y
+endif
+
 # If instrumentation of this dir is enabled, boot hangs during first second.
 # Probably could be more selective here, but note that files related to irqs,
 # boot, dumpstack/stacktrace, etc are either non-interesting or can lead to
index 7cb8ba08beb997ef66724e2db5e66021c5ce32ee..ef61f540cf0af6d14d3d97cf1b9124cd52585a14 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/ftrace.h>
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
+#include <asm/unwind_hints.h>
 
        .code64
        .section .entry.text, "ax"
@@ -20,7 +21,6 @@ EXPORT_SYMBOL(__fentry__)
 EXPORT_SYMBOL(mcount)
 #endif
 
-/* All cases save the original rbp (8 bytes) */
 #ifdef CONFIG_FRAME_POINTER
 # ifdef CC_USING_FENTRY
 /* Save parent and function stack frames (rip and rbp) */
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(mcount)
 # endif
 #else
 /* No need to save a stack frame */
-# define MCOUNT_FRAME_SIZE     8
+# define MCOUNT_FRAME_SIZE     0
 #endif /* CONFIG_FRAME_POINTER */
 
 /* Size of stack used to save mcount regs in save_mcount_regs */
@@ -64,10 +64,10 @@ EXPORT_SYMBOL(mcount)
  */
 .macro save_mcount_regs added=0
 
-       /* Always save the original rbp */
+#ifdef CONFIG_FRAME_POINTER
+       /* Save the original rbp */
        pushq %rbp
 
-#ifdef CONFIG_FRAME_POINTER
        /*
         * Stack traces will stop at the ftrace trampoline if the frame pointer
         * is not set up properly. If fentry is used, we need to save a frame
@@ -105,7 +105,11 @@ EXPORT_SYMBOL(mcount)
         * Save the original RBP. Even though the mcount ABI does not
         * require this, it helps out callers.
         */
+#ifdef CONFIG_FRAME_POINTER
        movq MCOUNT_REG_SIZE-8(%rsp), %rdx
+#else
+       movq %rbp, %rdx
+#endif
        movq %rdx, RBP(%rsp)
 
        /* Copy the parent address into %rsi (second parameter) */
@@ -148,7 +152,7 @@ EXPORT_SYMBOL(mcount)
 
 ENTRY(function_hook)
        retq
-END(function_hook)
+ENDPROC(function_hook)
 
 ENTRY(ftrace_caller)
        /* save_mcount_regs fills in first two parameters */
@@ -184,7 +188,7 @@ GLOBAL(ftrace_graph_call)
 /* This is weak to keep gas from relaxing the jumps */
 WEAK(ftrace_stub)
        retq
-END(ftrace_caller)
+ENDPROC(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
        /* Save the current flags before any operations that can change them */
@@ -255,7 +259,7 @@ GLOBAL(ftrace_regs_caller_end)
 
        jmp ftrace_epilogue
 
-END(ftrace_regs_caller)
+ENDPROC(ftrace_regs_caller)
 
 
 #else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -313,9 +317,10 @@ ENTRY(ftrace_graph_caller)
        restore_mcount_regs
 
        retq
-END(ftrace_graph_caller)
+ENDPROC(ftrace_graph_caller)
 
-GLOBAL(return_to_handler)
+ENTRY(return_to_handler)
+       UNWIND_HINT_EMPTY
        subq  $24, %rsp
 
        /* Save the return values */
@@ -330,4 +335,5 @@ GLOBAL(return_to_handler)
        movq (%rsp), %rax
        addq $24, %rsp
        JMP_NOSPEC %rdi
+END(return_to_handler)
 #endif
index be86a865087a6b9dc8e04031dbf2e2fbeeda1ed5..1f9188f5357cb38e45295c07ba5f2e902563257e 100644 (file)
@@ -74,8 +74,50 @@ static struct orc_entry *orc_module_find(unsigned long ip)
 }
 #endif
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+static struct orc_entry *orc_find(unsigned long ip);
+
+/*
+ * Ftrace dynamic trampolines do not have orc entries of their own.
+ * But they are copies of the ftrace entries that are static and
+ * defined in ftrace_*.S, which do have orc entries.
+ *
+ * If the undwinder comes across a ftrace trampoline, then find the
+ * ftrace function that was used to create it, and use that ftrace
+ * function's orc entrie, as the placement of the return code in
+ * the stack will be identical.
+ */
+static struct orc_entry *orc_ftrace_find(unsigned long ip)
+{
+       struct ftrace_ops *ops;
+       unsigned long caller;
+
+       ops = ftrace_ops_trampoline(ip);
+       if (!ops)
+               return NULL;
+
+       if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
+               caller = (unsigned long)ftrace_regs_call;
+       else
+               caller = (unsigned long)ftrace_call;
+
+       /* Prevent unlikely recursion */
+       if (ip == caller)
+               return NULL;
+
+       return orc_find(caller);
+}
+#else
+static struct orc_entry *orc_ftrace_find(unsigned long ip)
+{
+       return NULL;
+}
+#endif
+
 static struct orc_entry *orc_find(unsigned long ip)
 {
+       static struct orc_entry *orc;
+
        if (!orc_init)
                return NULL;
 
@@ -111,7 +153,11 @@ static struct orc_entry *orc_find(unsigned long ip)
                                  __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
 
        /* Module lookup: */
-       return orc_module_find(ip);
+       orc = orc_module_find(ip);
+       if (orc)
+               return orc;
+
+       return orc_ftrace_find(ip);
 }
 
 static void orc_sort_swap(void *_a, void *_b, int size)
index f6a26e3cb4763325465df6ec19efb282e26e41fd..54ef19e90705aa0f1aa9ef5a94beec403d2f54ce 100644 (file)
@@ -662,11 +662,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
  */
 static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
 {
+       static const char *name = "PCI Bus 0000:00";
+       struct resource *res, *conflict;
        u32 base, limit, high;
        struct pci_dev *other;
-       struct resource *res;
        unsigned i;
-       int r;
 
        if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
                return;
@@ -707,21 +707,26 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
         * Allocate a 256GB window directly below the 0xfd00000000 hardware
         * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
         */
-       res->name = "PCI Bus 0000:00";
+       res->name = name;
        res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
                IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
        res->start = 0xbd00000000ull;
        res->end = 0xfd00000000ull - 1;
 
-       r = request_resource(&iomem_resource, res);
-       if (r) {
+       conflict = request_resource_conflict(&iomem_resource, res);
+       if (conflict) {
                kfree(res);
-               return;
-       }
+               if (conflict->name != name)
+                       return;
 
-       dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
-                res);
-       add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+               /* We are resuming from suspend; just reenable the window */
+               res = conflict;
+       } else {
+               dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
+                        res);
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+               pci_bus_add_resource(dev->bus, res, 0);
+       }
 
        base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
                AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
@@ -733,13 +738,16 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
        pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
        pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
        pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
-
-       pci_bus_add_resource(dev->bus, res, 0);
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
 
 #endif
index deb96de54b0030244ec88014bce526119c3fae91..ee2431a7804ec81f4e74cfceaa0969d16e7a336e 100644 (file)
@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
        struct nvkm_pci *pci = arg;
        struct nvkm_device *device = pci->subdev.device;
        bool handled = false;
+
+       if (pci->irq < 0)
+               return IRQ_HANDLED;
+
        nvkm_mc_intr_unarm(device);
        if (pci->msi)
                pci->func->msi_rearm(pci);
@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
 {
        struct nvkm_pci *pci = nvkm_pci(subdev);
 
-       if (pci->irq >= 0) {
-               free_irq(pci->irq, pci);
-               pci->irq = -1;
-       }
-
        if (pci->agp.bridge)
                nvkm_agp_fini(pci);
 
@@ -108,8 +107,20 @@ static int
 nvkm_pci_oneinit(struct nvkm_subdev *subdev)
 {
        struct nvkm_pci *pci = nvkm_pci(subdev);
-       if (pci_is_pcie(pci->pdev))
-               return nvkm_pcie_oneinit(pci);
+       struct pci_dev *pdev = pci->pdev;
+       int ret;
+
+       if (pci_is_pcie(pci->pdev)) {
+               ret = nvkm_pcie_oneinit(pci);
+               if (ret)
+                       return ret;
+       }
+
+       ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
+       if (ret)
+               return ret;
+
+       pci->irq = pdev->irq;
        return 0;
 }
 
@@ -117,7 +128,6 @@ static int
 nvkm_pci_init(struct nvkm_subdev *subdev)
 {
        struct nvkm_pci *pci = nvkm_pci(subdev);
-       struct pci_dev *pdev = pci->pdev;
        int ret;
 
        if (pci->agp.bridge) {
@@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
        if (pci->func->init)
                pci->func->init(pci);
 
-       ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
-       if (ret)
-               return ret;
-
-       pci->irq = pdev->irq;
-
        /* Ensure MSI interrupts are armed, for the case where there are
         * already interrupts pending (for whatever reason) at load time.
         */
        if (pci->msi)
                pci->func->msi_rearm(pci);
 
-       return ret;
+       return 0;
 }
 
 static void *
 nvkm_pci_dtor(struct nvkm_subdev *subdev)
 {
        struct nvkm_pci *pci = nvkm_pci(subdev);
+
        nvkm_agp_dtor(pci);
+
+       if (pci->irq >= 0) {
+               /* freq_irq() will call the handler, we use pci->irq == -1
+                * to signal that it's been torn down and should be a noop.
+                */
+               int irq = pci->irq;
+               pci->irq = -1;
+               free_irq(irq, pci);
+       }
+
        if (pci->msi)
                pci_disable_msi(pci->pdev);
+
        return nvkm_pci(subdev);
 }
 
index 638540943c61a5e095c87be8d2b2bf543ea933b1..c94cce96544c8b0196f16b2e9151e15430a28690 100644 (file)
@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
        struct vc4_exec_info *exec[2];
        struct vc4_bo *bo;
        unsigned long irqflags;
-       unsigned int i, j, unref_list_count, prev_idx;
+       unsigned int i, j, k, unref_list_count;
 
        kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
        if (!kernel_state)
@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
                return;
        }
 
-       prev_idx = 0;
+       k = 0;
        for (i = 0; i < 2; i++) {
                if (!exec[i])
                        continue;
@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
                        WARN_ON(!refcount_read(&bo->usecnt));
                        refcount_inc(&bo->usecnt);
                        drm_gem_object_get(&exec[i]->bo[j]->base);
-                       kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
+                       kernel_state->bo[k++] = &exec[i]->bo[j]->base;
                }
 
                list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
                         * because they are naturally unpurgeable.
                         */
                        drm_gem_object_get(&bo->base.base);
-                       kernel_state->bo[j + prev_idx] = &bo->base.base;
-                       j++;
+                       kernel_state->bo[k++] = &bo->base.base;
                }
-               prev_idx = j + 1;
        }
 
+       WARN_ON_ONCE(k != state->bo_count);
+
        if (exec[0])
                state->start_bin = exec[0]->ct0ca;
        if (exec[1])
@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
                  VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
 }
 
+static void
+vc4_flush_texture_caches(struct drm_device *dev)
+{
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+       V3D_WRITE(V3D_L2CACTL,
+                 V3D_L2CACTL_L2CCLR);
+
+       V3D_WRITE(V3D_SLCACTL,
+                 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
+                 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
+}
+
 /* Sets the registers for the next job to be actually be executed in
  * the hardware.
  *
@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
        if (!exec)
                return;
 
+       /* A previous RCL may have written to one of our textures, and
+        * our full cache flush at bin time may have occurred before
+        * that RCL completed.  Flush the texture cache now, but not
+        * the instructions or uniforms (since we don't write those
+        * from an RCL).
+        */
+       vc4_flush_texture_caches(dev);
+
        submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
 }
 
index 2c13123bfd69499e3ac7661871d176c57979664b..71ea9e26666cd2e9ac30e7f099289f04f8d501ba 100644 (file)
@@ -1456,8 +1456,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
        int e = skb_queue_empty(&priv->cm.skb_queue);
 
-       if (skb_dst(skb))
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       skb_dst_update_pmtu(skb, mtu);
 
        skb_queue_tail(&priv->cm.skb_queue, skb);
        if (e)
index d86e59515b9c9eadeab8346e8f97780e3edf1690..d88d3e0f59fb832cd569330a2899cf9647a08945 100644 (file)
@@ -229,6 +229,7 @@ static const struct xpad_device {
        { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -475,6 +476,22 @@ static const u8 xboxone_hori_init[] = {
        0x00, 0x00, 0x00, 0x80, 0x00
 };
 
+/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init1[] = {
+       0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
+};
+
+/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init2[] = {
+       0x06, 0x20, 0x00, 0x02, 0x01, 0x00
+};
+
 /*
  * A specific rumble packet is required for some PowerA pads to start
  * sending input reports. One of those pads is (0x24c6:0x543a).
@@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
        XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+       XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
+       XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
        XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
index 0871010f18d5f449e6440a5e41f71d24b8aecfd2..bbd29220dbe99838ba69a48dc3dfb595ec0dae0f 100644 (file)
 #include "psmouse.h"
 #include "trackpoint.h"
 
+static const char * const trackpoint_variants[] = {
+       [TP_VARIANT_IBM]        = "IBM",
+       [TP_VARIANT_ALPS]       = "ALPS",
+       [TP_VARIANT_ELAN]       = "Elan",
+       [TP_VARIANT_NXP]        = "NXP",
+};
+
 /*
  * Power-on Reset: Resets all trackpoint parameters, including RAM values,
  * to defaults.
@@ -26,7 +33,7 @@
  */
 static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
 {
-       unsigned char results[2];
+       u8 results[2];
        int tries = 0;
 
        /* Issue POR command, and repeat up to once if 0xFC00 received */
@@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
 
        /* Check for success response -- 0xAA00 */
        if (results[0] != 0xAA || results[1] != 0x00)
-               return -1;
+               return -ENODEV;
 
        return 0;
 }
@@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
 /*
  * Device IO: read, write and toggle bit
  */
-static int trackpoint_read(struct ps2dev *ps2dev,
-                          unsigned char loc, unsigned char *results)
+static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results)
 {
        if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
            ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
@@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev,
        return 0;
 }
 
-static int trackpoint_write(struct ps2dev *ps2dev,
-                           unsigned char loc, unsigned char val)
+static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val)
 {
        if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
            ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
@@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev,
        return 0;
 }
 
-static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
-                                unsigned char loc, unsigned char mask)
+static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask)
 {
        /* Bad things will happen if the loc param isn't in this range */
        if (loc < 0x20 || loc >= 0x2F)
@@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
        return 0;
 }
 
-static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
-                                unsigned char mask, unsigned char value)
+static int trackpoint_update_bit(struct ps2dev *ps2dev,
+                                u8 loc, u8 mask, u8 value)
 {
        int retval = 0;
-       unsigned char data;
+       u8 data;
 
        trackpoint_read(ps2dev, loc, &data);
        if (((data & mask) == mask) != !!value)
@@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
  */
 struct trackpoint_attr_data {
        size_t field_offset;
-       unsigned char command;
-       unsigned char mask;
-       unsigned char inverted;
-       unsigned char power_on_default;
+       u8 command;
+       u8 mask;
+       bool inverted;
+       u8 power_on_default;
 };
 
-static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
+static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse,
+                                       void *data, char *buf)
 {
        struct trackpoint_data *tp = psmouse->private;
        struct trackpoint_attr_data *attr = data;
-       unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset);
+       u8 value = *(u8 *)((void *)tp + attr->field_offset);
 
        if (attr->inverted)
                value = !value;
@@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
 {
        struct trackpoint_data *tp = psmouse->private;
        struct trackpoint_attr_data *attr = data;
-       unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
-       unsigned char value;
+       u8 *field = (void *)tp + attr->field_offset;
+       u8 value;
        int err;
 
        err = kstrtou8(buf, 10, &value);
@@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
 {
        struct trackpoint_data *tp = psmouse->private;
        struct trackpoint_attr_data *attr = data;
-       unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
-       unsigned int value;
+       bool *field = (void *)tp + attr->field_offset;
+       bool value;
        int err;
 
-       err = kstrtouint(buf, 10, &value);
+       err = kstrtobool(buf, &value);
        if (err)
                return err;
 
-       if (value > 1)
-               return -EINVAL;
-
        if (attr->inverted)
                value = !value;
 
@@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO,                              \
                    &trackpoint_attr_##_name,                           \
                    trackpoint_show_int_attr, trackpoint_set_bit_attr)
 
-#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name)                    \
-do {                                                                   \
-       struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name;  \
-                                                                       \
-       trackpoint_update_bit(&_psmouse->ps2dev,                        \
-                       _attr->command, _attr->mask, _tp->_name);       \
-} while (0)
-
-#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name)             \
-do {                                                                   \
-       if (!_power_on ||                                               \
-           _tp->_name != trackpoint_attr_##_name.power_on_default) {   \
-               if (!trackpoint_attr_##_name.mask)                      \
-                       trackpoint_write(&_psmouse->ps2dev,             \
-                                trackpoint_attr_##_name.command,       \
-                                _tp->_name);                           \
-               else                                                    \
-                       TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name);    \
-       }                                                               \
-} while (0)
-
-#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name)                            \
-       (_tp->_name = trackpoint_attr_##_name.power_on_default)
-
 TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS);
 TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED);
 TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA);
@@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
 TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
 TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
 
-TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
+TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false,
                    TP_DEF_PTSON);
-TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0,
+TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false,
                    TP_DEF_SKIPBACK);
-TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1,
+TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true,
                    TP_DEF_EXT_DEV);
 
+static bool trackpoint_is_attr_available(struct psmouse *psmouse,
+                                        struct attribute *attr)
+{
+       struct trackpoint_data *tp = psmouse->private;
+
+       return tp->variant_id == TP_VARIANT_IBM ||
+               attr == &psmouse_attr_sensitivity.dattr.attr ||
+               attr == &psmouse_attr_press_to_select.dattr.attr;
+}
+
+static umode_t trackpoint_is_attr_visible(struct kobject *kobj,
+                                         struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct serio *serio = to_serio_port(dev);
+       struct psmouse *psmouse = serio_get_drvdata(serio);
+
+       return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0;
+}
+
 static struct attribute *trackpoint_attrs[] = {
        &psmouse_attr_sensitivity.dattr.attr,
        &psmouse_attr_speed.dattr.attr,
@@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = {
 };
 
 static struct attribute_group trackpoint_attr_group = {
-       .attrs = trackpoint_attrs,
+       .is_visible     = trackpoint_is_attr_visible,
+       .attrs          = trackpoint_attrs,
 };
 
-static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id)
-{
-       unsigned char param[2] = { 0 };
+#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name)             \
+do {                                                                   \
+       struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name;  \
+                                                                       \
+       if ((!_power_on || _tp->_name != _attr->power_on_default) &&    \
+           trackpoint_is_attr_available(_psmouse,                      \
+                               &psmouse_attr_##_name.dattr.attr)) {    \
+               if (!_attr->mask)                                       \
+                       trackpoint_write(&_psmouse->ps2dev,             \
+                                        _attr->command, _tp->_name);   \
+               else                                                    \
+                       trackpoint_update_bit(&_psmouse->ps2dev,        \
+                                       _attr->command, _attr->mask,    \
+                                       _tp->_name);                    \
+       }                                                               \
+} while (0)
 
-       if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
-               return -1;
+#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name)                    \
+do {                                                                   \
+       _tp->_name = trackpoint_attr_##_name.power_on_default;          \
+} while (0)
 
-       /* add new TP ID. */
-       if (!(param[0] & TP_MAGIC_IDENT))
-               return -1;
+static int trackpoint_start_protocol(struct psmouse *psmouse,
+                                    u8 *variant_id, u8 *firmware_id)
+{
+       u8 param[2] = { 0 };
+       int error;
 
-       if (firmware_id)
-               *firmware_id = param[1];
+       error = ps2_command(&psmouse->ps2dev,
+                           param, MAKE_PS2_CMD(0, 2, TP_READ_ID));
+       if (error)
+               return error;
+
+       switch (param[0]) {
+       case TP_VARIANT_IBM:
+       case TP_VARIANT_ALPS:
+       case TP_VARIANT_ELAN:
+       case TP_VARIANT_NXP:
+               if (variant_id)
+                       *variant_id = param[0];
+               if (firmware_id)
+                       *firmware_id = param[1];
+               return 0;
+       }
 
-       return 0;
+       return -ENODEV;
 }
 
 /*
@@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
 {
        struct trackpoint_data *tp = psmouse->private;
 
-       if (!in_power_on_state) {
+       if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) {
                /*
                 * Disable features that may make device unusable
                 * with this driver.
@@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
 
 static void trackpoint_disconnect(struct psmouse *psmouse)
 {
-       sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group);
+       device_remove_group(&psmouse->ps2dev.serio->dev,
+                           &trackpoint_attr_group);
 
        kfree(psmouse->private);
        psmouse->private = NULL;
@@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse)
 
 static int trackpoint_reconnect(struct psmouse *psmouse)
 {
-       int reset_fail;
+       struct trackpoint_data *tp = psmouse->private;
+       int error;
+       bool was_reset;
 
-       if (trackpoint_start_protocol(psmouse, NULL))
-               return -1;
+       error = trackpoint_start_protocol(psmouse, NULL, NULL);
+       if (error)
+               return error;
 
-       reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev);
-       if (trackpoint_sync(psmouse, !reset_fail))
-               return -1;
+       was_reset = tp->variant_id == TP_VARIANT_IBM &&
+                   trackpoint_power_on_reset(&psmouse->ps2dev) == 0;
+
+       error = trackpoint_sync(psmouse, was_reset);
+       if (error)
+               return error;
 
        return 0;
 }
@@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse)
 int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
 {
        struct ps2dev *ps2dev = &psmouse->ps2dev;
-       unsigned char firmware_id;
-       unsigned char button_info;
+       struct trackpoint_data *tp;
+       u8 variant_id;
+       u8 firmware_id;
+       u8 button_info;
        int error;
 
-       if (trackpoint_start_protocol(psmouse, &firmware_id))
-               return -1;
+       error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id);
+       if (error)
+               return error;
 
        if (!set_properties)
                return 0;
 
-       if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
-               psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
-               button_info = 0x33;
-       }
-
-       psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
-       if (!psmouse->private)
+       tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+       if (!tp)
                return -ENOMEM;
 
-       psmouse->vendor = "IBM";
+       trackpoint_defaults(tp);
+       tp->variant_id = variant_id;
+       tp->firmware_id = firmware_id;
+
+       psmouse->private = tp;
+
+       psmouse->vendor = trackpoint_variants[variant_id];
        psmouse->name = "TrackPoint";
 
        psmouse->reconnect = trackpoint_reconnect;
        psmouse->disconnect = trackpoint_disconnect;
 
+       if (variant_id != TP_VARIANT_IBM) {
+               /* Newer variants do not support extended button query. */
+               button_info = 0x33;
+       } else {
+               error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info);
+               if (error) {
+                       psmouse_warn(psmouse,
+                                    "failed to get extended button data, assuming 3 buttons\n");
+                       button_info = 0x33;
+               } else if (!button_info) {
+                       psmouse_warn(psmouse,
+                                    "got 0 in extended button data, assuming 3 buttons\n");
+                       button_info = 0x33;
+               }
+       }
+
        if ((button_info & 0x0f) >= 3)
-               __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
+               input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE);
 
        __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
        __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
 
-       trackpoint_defaults(psmouse->private);
-
-       error = trackpoint_power_on_reset(ps2dev);
-
-       /* Write defaults to TP only if reset fails. */
-       if (error)
+       if (variant_id != TP_VARIANT_IBM ||
+           trackpoint_power_on_reset(ps2dev) != 0) {
+               /*
+                * Write defaults to TP if we did not reset the trackpoint.
+                */
                trackpoint_sync(psmouse, false);
+       }
 
-       error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
+       error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group);
        if (error) {
                psmouse_err(psmouse,
                            "failed to create sysfs attributes, error: %d\n",
@@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
        }
 
        psmouse_info(psmouse,
-                    "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
-                    firmware_id,
+                    "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
+                    psmouse->vendor, firmware_id,
                     (button_info & 0xf0) >> 4, button_info & 0x0f);
 
        return 0;
index 88055755f82e2304a5f7438bc59cb7f47f488823..10a0391482343d4ba482336367b204cf64f6e47c 100644 (file)
 #define TP_COMMAND             0xE2    /* Commands start with this */
 
 #define TP_READ_ID             0xE1    /* Sent for device identification */
-#define TP_MAGIC_IDENT         0x03    /* Sent after a TP_READ_ID followed */
-                                       /* by the firmware ID */
-                                       /* Firmware ID includes 0x1, 0x2, 0x3 */
 
+/*
+ * Valid first byte responses to the "Read Secondary ID" (0xE1) command.
+ * 0x01 was the original IBM trackpoint, others implement very limited
+ * subset of trackpoint features.
+ */
+#define TP_VARIANT_IBM         0x01
+#define TP_VARIANT_ALPS                0x02
+#define TP_VARIANT_ELAN                0x03
+#define TP_VARIANT_NXP         0x04
 
 /*
  * Commands
 
 #define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
 
-struct trackpoint_data
-{
-       unsigned char sensitivity, speed, inertia, reach;
-       unsigned char draghys, mindrag;
-       unsigned char thresh, upthresh;
-       unsigned char ztime, jenks;
-       unsigned char drift_time;
+struct trackpoint_data {
+       u8 variant_id;
+       u8 firmware_id;
+
+       u8 sensitivity, speed, inertia, reach;
+       u8 draghys, mindrag;
+       u8 thresh, upthresh;
+       u8 ztime, jenks;
+       u8 drift_time;
 
        /* toggles */
-       unsigned char press_to_select;
-       unsigned char skipback;
-       unsigned char ext_dev;
+       bool press_to_select;
+       bool skipback;
+       bool ext_dev;
 };
 
 #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
index ae966e333a2f47242eca6443eabf4d38c575813a..8a07ae147df690ee7796c3f9f897904fce6ac6dd 100644 (file)
@@ -570,14 +570,19 @@ static int rmi_f01_probe(struct rmi_function *fn)
 
        dev_set_drvdata(&fn->dev, f01);
 
-       error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group);
+       error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
        if (error)
-               dev_warn(&fn->dev,
-                        "Failed to create attribute group: %d\n", error);
+               dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
 
        return 0;
 }
 
+static void rmi_f01_remove(struct rmi_function *fn)
+{
+       /* Note that the bus device is used, not the F01 device */
+       sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
+}
+
 static int rmi_f01_config(struct rmi_function *fn)
 {
        struct f01_data *f01 = dev_get_drvdata(&fn->dev);
@@ -717,6 +722,7 @@ struct rmi_function_handler rmi_f01_handler = {
        },
        .func           = 0x01,
        .probe          = rmi_f01_probe,
+       .remove         = rmi_f01_remove,
        .config         = rmi_f01_config,
        .attention      = rmi_f01_attention,
        .suspend        = rmi_f01_suspend,
index 26b1cb8a88ece321777f6950a762e57e74c0c840..675efa93d4448b556b27bc2e1e0b682273b1d0c3 100644 (file)
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Samsung S6SY761 Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// Samsung S6SY761 Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
 
 #include <asm/unaligned.h>
 #include <linux/delay.h>
index c12d0189993965121eedf9c625dffcccbc26428a..2a123e20a42e39ce4262b8a7a06e3c57aa9b3080 100644 (file)
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * STMicroelectronics FTS Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// STMicroelectronics FTS Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
 
 #include <linux/delay.h>
 #include <linux/i2c.h>
index d4a548a6a55cd9d4793a282e231414143a139324..a452d5a1b0f3fb4c602793679b1e80295ea13e29 100644 (file)
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                        ethtype_mask = 0;
                }
 
+               if (ethtype_key == ETH_P_IPV6)
+                       fs->type = 1;
+
                fs->val.ethtype = ethtype_key;
                fs->mask.ethtype = ethtype_mask;
                fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                                           VLAN_PRIO_SHIFT);
                vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
                                                 VLAN_PRIO_SHIFT);
-               fs->val.ivlan = cpu_to_be16(vlan_tci);
-               fs->mask.ivlan = cpu_to_be16(vlan_tci_mask);
+               fs->val.ivlan = vlan_tci;
+               fs->mask.ivlan = vlan_tci_mask;
 
                /* Chelsio adapters use ivlan_vld bit to match vlan packets
                 * as 802.1Q. Also, when vlan tag is present in packets,
index c6e859a27ee634bd5d0e00ce9d64c85531427284..e180657a02ef7407d90d58c8629d2ff59199a862 100644 (file)
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
 
        be_schedule_worker(adapter);
 
+       /*
+        * The IF was destroyed and re-created. We need to clear
+        * all promiscuous flags valid for the destroyed IF.
+        * Without this promisc mode is not restored during
+        * be_open() because the driver thinks that it is
+        * already enabled in HW.
+        */
+       adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+
        if (netif_running(netdev))
                status = be_open(netdev);
 
index 7feff2450ed68717240c1793279c45ee5bef525f..241db3199b88cdf551b0fb2ab0be501d8d852c82 100644 (file)
@@ -494,6 +494,9 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
        case 16384:
                ret |= EMAC_MR1_RFS_16K;
                break;
+       case 8192:
+               ret |= EMAC4_MR1_RFS_8K;
+               break;
        case 4096:
                ret |= EMAC_MR1_RFS_4K;
                break;
@@ -516,6 +519,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
        case 16384:
                ret |= EMAC4_MR1_TFS_16K;
                break;
+       case 8192:
+               ret |= EMAC4_MR1_TFS_8K;
+               break;
        case 4096:
                ret |= EMAC4_MR1_TFS_4K;
                break;
index 5afcc27ceebb6872bfb9cd2cda8abda2fcdae934..c26d2631ca30e55ccfe3a0f37b8a4ed76e8bc73d 100644 (file)
@@ -151,9 +151,11 @@ struct emac_regs {
 
 #define EMAC4_MR1_RFS_2K               0x00100000
 #define EMAC4_MR1_RFS_4K               0x00180000
+#define EMAC4_MR1_RFS_8K               0x00200000
 #define EMAC4_MR1_RFS_16K              0x00280000
 #define EMAC4_MR1_TFS_2K                       0x00020000
 #define EMAC4_MR1_TFS_4K               0x00030000
+#define EMAC4_MR1_TFS_8K               0x00040000
 #define EMAC4_MR1_TFS_16K              0x00050000
 #define EMAC4_MR1_TR                   0x00008000
 #define EMAC4_MR1_MWSW_001             0x00001000
@@ -242,7 +244,7 @@ struct emac_regs {
 #define EMAC_STACR_PHYE                        0x00004000
 #define EMAC_STACR_STAC_MASK           0x00003000
 #define EMAC_STACR_STAC_READ           0x00001000
-#define EMAC_STACR_STAC_WRITE          0x00002000
+#define EMAC_STACR_STAC_WRITE          0x00000800
 #define EMAC_STACR_OPBC_MASK           0x00000C00
 #define EMAC_STACR_OPBC_50             0x00000000
 #define EMAC_STACR_OPBC_66             0x00000400
index ab2e1917cd04290eef52bf188f2f2def6ae389cc..b65f5f3ac034e980c46af086c11385f5aa9d0832 100644 (file)
@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
        struct ibmvnic_rx_pool *rx_pool;
        int rx_scrqs;
        int i, j, rc;
+       u64 *size_array;
+
+       size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+               be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
 
        rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
        for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
 
                netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
 
-               rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+               if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+                       free_long_term_buff(adapter, &rx_pool->long_term_buff);
+                       rx_pool->buff_size = be64_to_cpu(size_array[i]);
+                       alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+                                            rx_pool->size *
+                                            rx_pool->buff_size);
+               } else {
+                       rc = reset_long_term_buff(adapter,
+                                                 &rx_pool->long_term_buff);
+               }
+
                if (rc)
                        return rc;
 
@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
 static void release_rx_pools(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_rx_pool *rx_pool;
-       int rx_scrqs;
        int i, j;
 
        if (!adapter->rx_pool)
                return;
 
-       rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
-       for (i = 0; i < rx_scrqs; i++) {
+       for (i = 0; i < adapter->num_active_rx_pools; i++) {
                rx_pool = &adapter->rx_pool[i];
 
                netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
 
        kfree(adapter->rx_pool);
        adapter->rx_pool = NULL;
+       adapter->num_active_rx_pools = 0;
 }
 
 static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
                return -1;
        }
 
+       adapter->num_active_rx_pools = 0;
+
        for (i = 0; i < rxadd_subcrqs; i++) {
                rx_pool = &adapter->rx_pool[i];
 
@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
                rx_pool->next_free = 0;
        }
 
+       adapter->num_active_rx_pools = rxadd_subcrqs;
+
        return 0;
 }
 
@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
 static void release_tx_pools(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_tx_pool *tx_pool;
-       int i, tx_scrqs;
+       int i;
 
        if (!adapter->tx_pool)
                return;
 
-       tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
-       for (i = 0; i < tx_scrqs; i++) {
+       for (i = 0; i < adapter->num_active_tx_pools; i++) {
                netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
                tx_pool = &adapter->tx_pool[i];
                kfree(tx_pool->tx_buff);
@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
 
        kfree(adapter->tx_pool);
        adapter->tx_pool = NULL;
+       adapter->num_active_tx_pools = 0;
 }
 
 static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
        if (!adapter->tx_pool)
                return -1;
 
+       adapter->num_active_tx_pools = 0;
+
        for (i = 0; i < tx_subcrqs; i++) {
                tx_pool = &adapter->tx_pool[i];
 
@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
                tx_pool->producer_index = 0;
        }
 
+       adapter->num_active_tx_pools = tx_subcrqs;
+
        return 0;
 }
 
@@ -860,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
        if (adapter->vpd->buff)
                len = adapter->vpd->len;
 
-       reinit_completion(&adapter->fw_done);
+       init_completion(&adapter->fw_done);
        crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
        crq.get_vpd_size.cmd = GET_VPD_SIZE;
        ibmvnic_send_crq(adapter, &crq);
@@ -922,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
        if (!adapter->vpd)
                return -ENOMEM;
 
+       /* Vital Product Data (VPD) */
+       rc = ibmvnic_get_vpd(adapter);
+       if (rc) {
+               netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
+               return rc;
+       }
+
        adapter->map_id = 1;
        adapter->napi = kcalloc(adapter->req_rx_queues,
                                sizeof(struct napi_struct), GFP_KERNEL);
@@ -995,7 +1023,7 @@ static int __ibmvnic_open(struct net_device *netdev)
 static int ibmvnic_open(struct net_device *netdev)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-       int rc, vpd;
+       int rc;
 
        mutex_lock(&adapter->reset_lock);
 
@@ -1018,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
        rc = __ibmvnic_open(netdev);
        netif_carrier_on(netdev);
 
-       /* Vital Product Data (VPD) */
-       vpd = ibmvnic_get_vpd(adapter);
-       if (vpd)
-               netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
-
        mutex_unlock(&adapter->reset_lock);
 
        return rc;
@@ -1548,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
 static int do_reset(struct ibmvnic_adapter *adapter,
                    struct ibmvnic_rwi *rwi, u32 reset_state)
 {
+       u64 old_num_rx_queues, old_num_tx_queues;
        struct net_device *netdev = adapter->netdev;
        int i, rc;
 
@@ -1557,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        netif_carrier_off(netdev);
        adapter->reset_reason = rwi->reset_reason;
 
+       old_num_rx_queues = adapter->req_rx_queues;
+       old_num_tx_queues = adapter->req_tx_queues;
+
        if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
                rc = ibmvnic_reenable_crq_queue(adapter);
                if (rc)
@@ -1601,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        rc = init_resources(adapter);
                        if (rc)
                                return rc;
+               } else if (adapter->req_rx_queues != old_num_rx_queues ||
+                          adapter->req_tx_queues != old_num_tx_queues) {
+                       release_rx_pools(adapter);
+                       release_tx_pools(adapter);
+                       init_rx_pools(netdev);
+                       init_tx_pools(netdev);
                } else {
                        rc = reset_tx_pools(adapter);
                        if (rc)
@@ -3592,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
                         *req_value,
                         (long int)be64_to_cpu(crq->request_capability_rsp.
                                               number), name);
-               *req_value = be64_to_cpu(crq->request_capability_rsp.number);
+
+               if (be16_to_cpu(crq->request_capability_rsp.capability) ==
+                   REQ_MTU) {
+                       pr_err("mtu of %llu is not supported. Reverting.\n",
+                              *req_value);
+                       *req_value = adapter->fallback.mtu;
+               } else {
+                       *req_value =
+                               be64_to_cpu(crq->request_capability_rsp.number);
+               }
+
                ibmvnic_send_req_caps(adapter, 1);
                return;
        default:
index 4487f1e2c2668daf41030c6f0d39c8a327ab904c..3aec42118db2500c7d126ddef5c895f0a036276a 100644 (file)
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
        u64 opt_rxba_entries_per_subcrq;
        __be64 tx_rx_desc_req;
        u8 map_id;
+       u64 num_active_rx_pools;
+       u64 num_active_tx_pools;
 
        struct tasklet_struct tasklet;
        enum vnic_state state;
index 42dcaefc4c1942777edf6044953ccd1f798cac2a..af792112a2d3da749d3b5e586b9fcf5adc9ec50d 100644 (file)
@@ -7505,6 +7505,8 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
 {
        struct i40e_vsi *vsi = np->vsi;
 
+       if (!tc_can_offload(vsi->netdev))
+               return -EOPNOTSUPP;
        if (cls_flower->common.chain_index)
                return -EOPNOTSUPP;
 
index e401d9d245f33e4187c2468f4533df052cdea5ec..b69a705fd787d9a78c19f11e866a866d341b926e 100644 (file)
@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
                return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
                                                   MLX5E_AM_STATS_WORSE;
 
+       if (!prev->ppms)
+               return curr->ppms ? MLX5E_AM_STATS_BETTER :
+                                   MLX5E_AM_STATS_SAME;
+
        if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
                return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
                                                   MLX5E_AM_STATS_WORSE;
+       if (!prev->epms)
+               return MLX5E_AM_STATS_SAME;
 
        if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
                return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
index 6c0391c13fe002f5372381c42a41b6c2f8087924..7042c855a5d6b2a9035194e346aec7623d5d2646 100644 (file)
@@ -1942,11 +1942,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
        dipn = htonl(dip);
        dev = mlxsw_sp->router->rifs[rif]->dev;
        n = neigh_lookup(&arp_tbl, &dipn, dev);
-       if (!n) {
-               netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
-                          &dip);
+       if (!n)
                return;
-       }
 
        netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
        neigh_event_send(n, NULL);
@@ -1973,11 +1970,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
 
        dev = mlxsw_sp->router->rifs[rif]->dev;
        n = neigh_lookup(&nd_tbl, &dip, dev);
-       if (!n) {
-               netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
-                          &dip);
+       if (!n)
                return;
-       }
 
        netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
        neigh_event_send(n, NULL);
index c8c4b3940564db8ef9b024bf72333c47940d4d77..b7abb8205d3a9984d15af0cb90ebb630d78e60e4 100644 (file)
@@ -358,10 +358,27 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
        kfree(p_rdma_info);
 }
 
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+        struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+        spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+        qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+        spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
+{
+       qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
+}
+
 static void qed_rdma_free(struct qed_hwfn *p_hwfn)
 {
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
 
+       qed_rdma_free_reserved_lkey(p_hwfn);
        qed_rdma_resc_free(p_hwfn);
 }
 
@@ -615,9 +632,6 @@ static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
 {
        struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
 
-       /* The first DPI is reserved for the Kernel */
-       __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
-
        /* Tid 0 will be used as the key for "reserved MR".
         * The driver should allocate memory for it so it can be loaded but no
         * ramrod should be passed on it.
@@ -797,17 +811,6 @@ static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
        return p_hwfn->p_rdma_info->dev;
 }
 
-static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
-{
-       struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
-       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
-       spin_lock_bh(&p_hwfn->p_rdma_info->lock);
-       qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
-       spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
 static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
 {
        struct qed_hwfn *p_hwfn;
index fc0d5fa65ad4c1ca02073e2d1d1554df68443e86..734286ebe5ef557c0d2738702e86081552c68cc5 100644 (file)
@@ -2244,19 +2244,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
        void __iomem *ioaddr = tp->mmio_addr;
        dma_addr_t paddr = tp->counters_phys_addr;
        u32 cmd;
-       bool ret;
 
        RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+       RTL_R32(CounterAddrHigh);
        cmd = (u64)paddr & DMA_BIT_MASK(32);
        RTL_W32(CounterAddrLow, cmd);
        RTL_W32(CounterAddrLow, cmd | counter_cmd);
 
-       ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
-
-       RTL_W32(CounterAddrLow, 0);
-       RTL_W32(CounterAddrHigh, 0);
-
-       return ret;
+       return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
 }
 
 static bool rtl8169_reset_counters(struct net_device *dev)
index 0a48b3073d3d3614483e9d8ae64f073a5b3d2ead..64fda2e1040eb6d19e4a1f24fd1707e3240a7dbb 100644 (file)
@@ -829,7 +829,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
                          GENEVE_BASE_HLEN - info->options_len - 14;
 
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+               skb_dst_update_pmtu(skb, mtu);
        }
 
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@@ -875,7 +875,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
                          GENEVE_BASE_HLEN - info->options_len - 14;
 
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+               skb_dst_update_pmtu(skb, mtu);
        }
 
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
index 4e1da1645b154ddb60cc1aaed992d42d1f9adf4d..5aa59f41bf8c3992b5ba58e24ac8763ab264eb5f 100644 (file)
@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
        struct pppoe_hdr *ph;
        struct net_device *dev;
        char *start;
+       int hlen;
 
        lock_sock(sk);
        if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
        if (total_len > (dev->mtu + dev->hard_header_len))
                goto end;
 
-
-       skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
-                          0, GFP_KERNEL);
+       hlen = LL_RESERVED_SPACE(dev);
+       skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
+                          dev->needed_tailroom, 0, GFP_KERNEL);
        if (!skb) {
                error = -ENOMEM;
                goto end;
        }
 
        /* Reserve space for headers. */
-       skb_reserve(skb, dev->hard_header_len);
+       skb_reserve(skb, hlen);
        skb_reset_network_header(skb);
 
        skb->dev = dev;
@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
        /* Copy the data if there is no space for the header or if it's
         * read-only.
         */
-       if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
+       if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
                goto abort;
 
        __skb_push(skb, sizeof(*ph));
index d56fe32bf48dea8c617c011d5bd6ddc8d9d5270f..8a22ff67b0268a588428c61c6a6211e3c6c2a02a 100644 (file)
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
 void usbnet_defer_kevent (struct usbnet *dev, int work)
 {
        set_bit (work, &dev->flags);
-       if (!schedule_work (&dev->kevent)) {
-               if (net_ratelimit())
-                       netdev_err(dev->net, "kevent %d may have been dropped\n", work);
-       } else {
+       if (!schedule_work (&dev->kevent))
+               netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
+       else
                netdev_dbg(dev->net, "kevent %d scheduled\n", work);
-       }
 }
 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 
index d1c7029ded7cefef468eb035f971b969da612bf5..cf95290b160c565ed650846cd91076ec2a957ff9 100644 (file)
@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
                                          rq->rx_ring[i].basePA);
                        rq->rx_ring[i].base = NULL;
                }
-               rq->buf_info[i] = NULL;
        }
 
        if (rq->data_ring.base) {
@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
                        (rq->rx_ring[0].size + rq->rx_ring[1].size);
                dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
                                  rq->buf_info_pa);
+               rq->buf_info[0] = rq->buf_info[1] = NULL;
        }
 }
 
index feb1b2e15c2e10676ff43cec39291e3e9791a51d..139c61c8244ad9099ed8d6377b74eff446354e10 100644 (file)
@@ -673,8 +673,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
                                  struct sock *sk,
                                  struct sk_buff *skb)
 {
-       /* don't divert multicast */
-       if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+       /* don't divert multicast or local broadcast */
+       if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
+           ipv4_is_lbcast(ip_hdr(skb)->daddr))
                return skb;
 
        if (qdisc_tx_is_default(vrf_dev))
index 31f4b7911ef84c85789011332e37c5314099d82c..c3e34e3c82a7a9b38ef53ed1d40a66ac417679bd 100644 (file)
@@ -2158,8 +2158,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                if (skb_dst(skb)) {
                        int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
 
-                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
-                                                      skb, mtu);
+                       skb_dst_update_pmtu(skb, mtu);
                }
 
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
@@ -2200,8 +2199,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                if (skb_dst(skb)) {
                        int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
 
-                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
-                                                      skb, mtu);
+                       skb_dst_update_pmtu(skb, mtu);
                }
 
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
index 33ac2b186b85eb1f4883d26d6d0d9b3a8532fc01..5727b186b3ca56b6d43249bc69eb7f962bd22308 100644 (file)
@@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d)
 {
        int i = 0;
        for (i = 0; i < d->nvqs; ++i)
-               mutex_lock(&d->vqs[i]->mutex);
+               mutex_lock_nested(&d->vqs[i]->mutex, i);
 }
 
 static void vhost_dev_unlock_vqs(struct vhost_dev *d)
@@ -1015,6 +1015,10 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
                vhost_iotlb_notify_vq(dev, msg);
                break;
        case VHOST_IOTLB_INVALIDATE:
+               if (!dev->iotlb) {
+                       ret = -EFAULT;
+                       break;
+               }
                vhost_vq_meta_reset(dev);
                vhost_del_umem_range(dev->iotlb, msg->iova,
                                     msg->iova + msg->size - 1);
index 056276101c63a7060b09517353ba24cd00cde9a5..a6226cd6063c78dd0194c4c368698047da84b0a2 100644 (file)
@@ -1633,28 +1633,18 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
 int btrfs_should_delete_dir_index(struct list_head *del_list,
                                  u64 index)
 {
-       struct btrfs_delayed_item *curr, *next;
-       int ret;
-
-       if (list_empty(del_list))
-               return 0;
+       struct btrfs_delayed_item *curr;
+       int ret = 0;
 
-       list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+       list_for_each_entry(curr, del_list, readdir_list) {
                if (curr->key.offset > index)
                        break;
-
-               list_del(&curr->readdir_list);
-               ret = (curr->key.offset == index);
-
-               if (refcount_dec_and_test(&curr->refs))
-                       kfree(curr);
-
-               if (ret)
-                       return 1;
-               else
-                       continue;
+               if (curr->key.offset == index) {
+                       ret = 1;
+                       break;
+               }
        }
-       return 0;
+       return ret;
 }
 
 /*
index f650e475d8f0d84af1bb3013b6b35ef77421cde2..fdf2aad7347090b7ceecd97a5a9eb3dfdfc1093a 100644 (file)
@@ -60,10 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
                                gi->gid[i] = exp->ex_anon_gid;
                        else
                                gi->gid[i] = rqgi->gid[i];
-
-                       /* Each thread allocates its own gi, no race */
-                       groups_sort(gi);
                }
+
+               /* Each thread allocates its own gi, no race */
+               groups_sort(gi);
        } else {
                gi = get_group_info(rqgi);
        }
index ded456f17de611c33aee001deab8729bfdfbabfe..c584ad8d023cde88514967d8b2b7f96d56a7a9f8 100644 (file)
@@ -162,7 +162,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
        struct orangefs_kernel_op_s *op, *temp;
        __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
        static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
-       struct orangefs_kernel_op_s *cur_op = NULL;
+       struct orangefs_kernel_op_s *cur_op;
        unsigned long ret;
 
        /* We do not support blocking IO. */
@@ -186,6 +186,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
                return -EAGAIN;
 
 restart:
+       cur_op = NULL;
        /* Get next op (if any) from top of list. */
        spin_lock(&orangefs_request_list_lock);
        list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
index 1668fd645c453609473f9d6ab89b30499300d940..0d228cd087e6275b8c5c6e1b0e7102afe5405400 100644 (file)
@@ -452,7 +452,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
 static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *file = iocb->ki_filp;
-       loff_t pos = *(&iocb->ki_pos);
+       loff_t pos = iocb->ki_pos;
        ssize_t rc = 0;
 
        BUG_ON(iocb->private);
@@ -492,9 +492,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
                }
        }
 
-       if (file->f_pos > i_size_read(file->f_mapping->host))
-               orangefs_i_size_write(file->f_mapping->host, file->f_pos);
-
        rc = generic_write_checks(iocb, iter);
 
        if (rc <= 0) {
@@ -508,7 +505,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
         * pos to the end of the file, so we will wait till now to set
         * pos...
         */
-       pos = *(&iocb->ki_pos);
+       pos = iocb->ki_pos;
 
        rc = do_readv_writev(ORANGEFS_IO_WRITE,
                             file,
index 97adf7d100b5fe498f70345d1fcdfc525f1d2309..2595453fe7370ab078da275e9da48cdd18fa0e0f 100644 (file)
@@ -533,17 +533,6 @@ do {                                                                       \
        sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE;                  \
 } while (0)
 
-static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
-{
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-       inode_lock(inode);
-#endif
-       i_size_write(inode, i_size);
-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-       inode_unlock(inode);
-#endif
-}
-
 static inline void orangefs_set_timeout(struct dentry *dentry)
 {
        unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
index 835c6e148afccbb75105d221f8d3034159b20b52..0577d6dba8c81dc531f9ed027258f077c8a96ace 100644 (file)
@@ -29,10 +29,10 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
  */
 void purge_waiting_ops(void)
 {
-       struct orangefs_kernel_op_s *op;
+       struct orangefs_kernel_op_s *op, *tmp;
 
        spin_lock(&orangefs_request_list_lock);
-       list_for_each_entry(op, &orangefs_request_list, list) {
+       list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
                gossip_debug(GOSSIP_WAIT_DEBUG,
                             "pvfs2-client-core: purging op tag %llu %s\n",
                             llu(op->tag),
index 2bab81951ced732fb832f3b92cd2b71e840f9b78..3319df9727aa23744f17d56637d9a160d628541a 100644 (file)
@@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
 
 extern int ftrace_nr_registered_ops(void);
 
+struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
+
 bool is_ftrace_trampoline(unsigned long addr);
 
 /*
index 9c5a2628d6ce7b4f09e7542505c7b00ed0c8db8a..1d3877c39a000a6e5a4fb7647a38de51c5c47c50 100644 (file)
@@ -124,6 +124,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
        return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
 }
 
+static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+{
+       return swp_offset(entry);
+}
+
 static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
        return pfn_to_page(swp_offset(entry));
@@ -154,6 +159,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
        return false;
 }
 
+static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+{
+       return 0;
+}
+
 static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
        return NULL;
@@ -189,6 +199,11 @@ static inline int is_write_migration_entry(swp_entry_t entry)
        return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
 }
 
+static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+{
+       return swp_offset(entry);
+}
+
 static inline struct page *migration_entry_to_page(swp_entry_t entry)
 {
        struct page *p = pfn_to_page(swp_offset(entry));
@@ -218,6 +233,12 @@ static inline int is_migration_entry(swp_entry_t swp)
 {
        return 0;
 }
+
+static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+{
+       return 0;
+}
+
 static inline struct page *migration_entry_to_page(swp_entry_t entry)
 {
        return NULL;
index 853291714ae0b9a11682c87815efe0abc2ce2377..bae807eb2933f9391d42c3d0dd73d98928b91a98 100644 (file)
 #else
 #define MODULE_RANDSTRUCT_PLUGIN
 #endif
-#ifdef RETPOLINE
-#define MODULE_VERMAGIC_RETPOLINE "retpoline "
-#else
-#define MODULE_VERMAGIC_RETPOLINE ""
-#endif
 
 #define VERMAGIC_STRING                                                \
        UTS_RELEASE " "                                                 \
        MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT                     \
        MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS       \
        MODULE_ARCH_VERMAGIC                                            \
-       MODULE_RANDSTRUCT_PLUGIN                                        \
-       MODULE_VERMAGIC_RETPOLINE
+       MODULE_RANDSTRUCT_PLUGIN
 
index b091fd536098918e425394c55c46aae33e923239..d49d607dd2b3c564afa640929e46bcfcdb7431f0 100644 (file)
@@ -521,4 +521,12 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
 }
 #endif
 
+static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       if (dst && dst->ops->update_pmtu)
+               dst->ops->update_pmtu(dst, NULL, skb, mtu);
+}
+
 #endif /* _NET_DST_H */
index f73797e2fa60c51a81c8d7a0e231bd2be0137119..221238254eb7837a1b57c6313b7a8f8fa83c0040 100644 (file)
@@ -331,6 +331,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
                           int flags);
 int ip6_flowlabel_init(void);
 void ip6_flowlabel_cleanup(void);
+bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
 
 static inline void fl6_sock_release(struct ip6_flowlabel *fl)
 {
index 10f99dafd5acb16f2c477baeab912a730c410344..049008493faf67902365570a16f26233a1d9a6ca 100644 (file)
@@ -223,6 +223,11 @@ int net_eq(const struct net *net1, const struct net *net2)
        return net1 == net2;
 }
 
+static inline int check_net(const struct net *net)
+{
+       return atomic_read(&net->count) != 0;
+}
+
 void net_drop_ns(void *);
 
 #else
@@ -247,6 +252,11 @@ int net_eq(const struct net *net1, const struct net *net2)
        return 1;
 }
 
+static inline int check_net(const struct net *net)
+{
+       return 1;
+}
+
 #define net_drop_ns NULL
 #endif
 
index 8e08b6da72f325bd4a623191e886fb1b746644d7..753ac9361154be4ff9d05f5aa22473f8fb818ea0 100644 (file)
@@ -522,7 +522,7 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
 {
        switch (layer) {
                case TCF_LAYER_LINK:
-                       return skb->data;
+                       return skb_mac_header(skb);
                case TCF_LAYER_NETWORK:
                        return skb_network_header(skb);
                case TCF_LAYER_TRANSPORT:
index ccdf3664e4a9a7f6a79423dec807cd9f0c3ecd6f..554b517c61a04d8f976b7a10975d47327c2f7bee 100644 (file)
@@ -1119,15 +1119,11 @@ static struct ftrace_ops global_ops = {
 };
 
 /*
- * This is used by __kernel_text_address() to return true if the
- * address is on a dynamically allocated trampoline that would
- * not return true for either core_kernel_text() or
- * is_module_text_address().
+ * Used by the stack undwinder to know about dynamic ftrace trampolines.
  */
-bool is_ftrace_trampoline(unsigned long addr)
+struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
 {
-       struct ftrace_ops *op;
-       bool ret = false;
+       struct ftrace_ops *op = NULL;
 
        /*
         * Some of the ops may be dynamically allocated,
@@ -1144,15 +1140,24 @@ bool is_ftrace_trampoline(unsigned long addr)
                if (op->trampoline && op->trampoline_size)
                        if (addr >= op->trampoline &&
                            addr < op->trampoline + op->trampoline_size) {
-                               ret = true;
-                               goto out;
+                               preempt_enable_notrace();
+                               return op;
                        }
        } while_for_each_ftrace_op(op);
-
- out:
        preempt_enable_notrace();
 
-       return ret;
+       return NULL;
+}
+
+/*
+ * This is used by __kernel_text_address() to return true if the
+ * address is on a dynamically allocated trampoline that would
+ * not return true for either core_kernel_text() or
+ * is_module_text_address().
+ */
+bool is_ftrace_trampoline(unsigned long addr)
+{
+       return ftrace_ops_trampoline(addr) != NULL;
 }
 
 struct ftrace_page {
index 2a8d8a294345a258baca50b8a6b272c1ac0fc658..8e3f20a18a06dae09a240f54df3e60bebf9e6382 100644 (file)
@@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
+/*
+ * Skip 3:
+ *
+ *   trace_buffer_unlock_commit_regs()
+ *   trace_event_buffer_commit()
+ *   trace_event_raw_event_xxx()
+*/
+# define STACK_SKIP 3
+
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
                                     struct ring_buffer *buffer,
                                     struct ring_buffer_event *event,
@@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
        __buffer_unlock_commit(buffer, event);
 
        /*
-        * If regs is not set, then skip the following callers:
-        *   trace_buffer_unlock_commit_regs
-        *   event_trigger_unlock_commit
-        *   trace_event_buffer_commit
-        *   trace_event_raw_event_sched_switch
+        * If regs is not set, then skip the necessary functions.
         * Note, we can still get here via blktrace, wakeup tracer
         * and mmiotrace, but that's ok if they lose a function or
-        * two. They are that meaningful.
+        * two. They are not that meaningful.
         */
-       ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
+       ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
        ftrace_trace_userstack(buffer, flags, pc);
 }
 
@@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
        trace.skip              = skip;
 
        /*
-        * Add two, for this function and the call to save_stack_trace()
+        * Add one, for this function and the call to save_stack_trace()
         * If regs is set, then these functions will not be in the way.
         */
+#ifndef CONFIG_UNWINDER_ORC
        if (!regs)
-               trace.skip += 2;
+               trace.skip++;
+#endif
 
        /*
         * Since events can happen in NMIs there's no safe way to
@@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)
 
        local_save_flags(flags);
 
-       /*
-        * Skip 3 more, seems to get us at the caller of
-        * this function.
-        */
-       skip += 3;
+#ifndef CONFIG_UNWINDER_ORC
+       /* Skip 1 to skip this function. */
+       skip++;
+#endif
        __ftrace_trace_stack(global_trace.trace_buffer.buffer,
                             flags, skip, preempt_count(), NULL);
 }
index f2ac9d44f6c4b1f0ea4128836d9977b138c840ca..87411482a46f2753685c9eab3d9e91830d7ae47d 100644 (file)
@@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
 #ifdef CONFIG_STACKTRACE
+#ifdef CONFIG_UNWINDER_ORC
+/* Skip 2:
+ *   event_triggers_post_call()
+ *   trace_event_raw_event_xxx()
+ */
+# define STACK_SKIP 2
+#else
 /*
- * Skip 3:
+ * Skip 4:
  *   stacktrace_trigger()
  *   event_triggers_post_call()
+ *   trace_event_buffer_commit()
  *   trace_event_raw_event_xxx()
  */
-#define STACK_SKIP 3
+#define STACK_SKIP 4
+#endif
 
 static void
 stacktrace_trigger(struct event_trigger_data *data, void *rec)
index 27f7ad12c4b1b11da86dd161468c1c11944dd8fb..b611cd36e22db8ad79208e2ccb76bc643c6c5763 100644 (file)
@@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
        preempt_enable_notrace();
 }
 
+#ifdef CONFIG_UNWINDER_ORC
+/*
+ * Skip 2:
+ *
+ *   function_stack_trace_call()
+ *   ftrace_call()
+ */
+#define STACK_SKIP 2
+#else
+/*
+ * Skip 3:
+ *   __trace_stack()
+ *   function_stack_trace_call()
+ *   ftrace_call()
+ */
+#define STACK_SKIP 3
+#endif
+
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
        if (likely(disabled == 1)) {
                pc = preempt_count();
                trace_function(tr, ip, parent_ip, flags, pc);
-               /*
-                * skip over 5 funcs:
-                *    __ftrace_trace_stack,
-                *    __trace_stack,
-                *    function_stack_trace_call
-                *    ftrace_list_func
-                *    ftrace_call
-                */
-               __trace_stack(tr, flags, 5, pc);
+               __trace_stack(tr, flags, STACK_SKIP, pc);
        }
 
        atomic_dec(&data->disabled);
@@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
        tracer_tracing_off(tr);
 }
 
+#ifdef CONFIG_UNWINDER_ORC
 /*
- * Skip 4:
+ * Skip 3:
+ *
+ *   function_trace_probe_call()
+ *   ftrace_ops_assist_func()
+ *   ftrace_call()
+ */
+#define FTRACE_STACK_SKIP 3
+#else
+/*
+ * Skip 5:
+ *
+ *   __trace_stack()
  *   ftrace_stacktrace()
  *   function_trace_probe_call()
- *   ftrace_ops_list_func()
+ *   ftrace_ops_assist_func()
  *   ftrace_call()
  */
-#define STACK_SKIP 4
+#define FTRACE_STACK_SKIP 5
+#endif
 
 static __always_inline void trace_stack(struct trace_array *tr)
 {
@@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
        local_save_flags(flags);
        pc = preempt_count();
 
-       __trace_stack(tr, flags, STACK_SKIP, pc);
+       __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
 }
 
 static void
index d22b84310f6d40903083521fb18a5e858f464d93..ae3c2a35d61b7cae4fbecea4421bbd0450b81c53 100644 (file)
@@ -30,10 +30,37 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
        return true;
 }
 
+static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
+{
+       unsigned long hpage_pfn = page_to_pfn(hpage);
+
+       /* THP can be referenced by any subpage */
+       return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
+}
+
+/**
+ * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
+ *
+ * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
+ * mapped. check_pte() has to validate this.
+ *
+ * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
+ * page.
+ *
+ * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
+ * entry that points to @pvmw->page or any subpage in case of THP.
+ *
+ * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
+ * @pvmw->page or any subpage in case of THP.
+ *
+ * Otherwise, return false.
+ *
+ */
 static bool check_pte(struct page_vma_mapped_walk *pvmw)
 {
+       unsigned long pfn;
+
        if (pvmw->flags & PVMW_MIGRATION) {
-#ifdef CONFIG_MIGRATION
                swp_entry_t entry;
                if (!is_swap_pte(*pvmw->pte))
                        return false;
@@ -41,38 +68,25 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
 
                if (!is_migration_entry(entry))
                        return false;
-               if (migration_entry_to_page(entry) - pvmw->page >=
-                               hpage_nr_pages(pvmw->page)) {
-                       return false;
-               }
-               if (migration_entry_to_page(entry) < pvmw->page)
-                       return false;
-#else
-               WARN_ON_ONCE(1);
-#endif
-       } else {
-               if (is_swap_pte(*pvmw->pte)) {
-                       swp_entry_t entry;
 
-                       entry = pte_to_swp_entry(*pvmw->pte);
-                       if (is_device_private_entry(entry) &&
-                           device_private_entry_to_page(entry) == pvmw->page)
-                               return true;
-               }
+               pfn = migration_entry_to_pfn(entry);
+       } else if (is_swap_pte(*pvmw->pte)) {
+               swp_entry_t entry;
 
-               if (!pte_present(*pvmw->pte))
+               /* Handle un-addressable ZONE_DEVICE memory */
+               entry = pte_to_swp_entry(*pvmw->pte);
+               if (!is_device_private_entry(entry))
                        return false;
 
-               /* THP can be referenced by any subpage */
-               if (pte_page(*pvmw->pte) - pvmw->page >=
-                               hpage_nr_pages(pvmw->page)) {
-                       return false;
-               }
-               if (pte_page(*pvmw->pte) < pvmw->page)
+               pfn = device_private_entry_to_pfn(entry);
+       } else {
+               if (!pte_present(*pvmw->pte))
                        return false;
+
+               pfn = pte_pfn(*pvmw->pte);
        }
 
-       return true;
+       return pfn_in_hpage(pvmw->page, pfn);
 }
 
 /**
index 0e0ba36eeac9852b8df5ddd398dbc66ad6c83760..613fb4066be7bedbd3cd59ea9cf6eb7ef3100bd0 100644 (file)
@@ -3151,10 +3151,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
                hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
 
                /* + transport layer */
-               if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
-                       hdr_len += tcp_hdrlen(skb);
-               else
-                       hdr_len += sizeof(struct udphdr);
+               if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+                       const struct tcphdr *th;
+                       struct tcphdr _tcphdr;
+
+                       th = skb_header_pointer(skb, skb_transport_offset(skb),
+                                               sizeof(_tcphdr), &_tcphdr);
+                       if (likely(th))
+                               hdr_len += __tcp_hdrlen(th);
+               } else {
+                       struct udphdr _udphdr;
+
+                       if (skb_header_pointer(skb, skb_transport_offset(skb),
+                                              sizeof(_udphdr), &_udphdr))
+                               hdr_len += sizeof(struct udphdr);
+               }
 
                if (shinfo->gso_type & SKB_GSO_DODGY)
                        gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
index 1c75cd1255f69ebf0b57afb742930b7244af11a0..92d016e87816e3bc40a629691d0db2a6915d2c28 100644 (file)
@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
 
        ccid2_pr_debug("RTO_EXPIRE\n");
 
+       if (sk->sk_state == DCCP_CLOSED)
+               goto out;
+
        /* back-off timer */
        hc->tx_rto <<= 1;
        if (hc->tx_rto > DCCP_RTO_MAX)
index b1338e576d00389db8b6ce2383550e9aa22a4399..29b333a62ab01d606301ae45a7ec5f198cf1a5a0 100644 (file)
@@ -122,6 +122,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
        if (!xo)
                goto out;
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
+               goto out;
+
        seq = xo->seq.low;
 
        x = skb->sp->xvec[skb->sp->len - 1];
index 726f6b6082748896686ae603546fa189348f9142..2d49717a742107eddd4ea31ce418d260146efe19 100644 (file)
@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
                return htonl(INADDR_ANY);
 
        for_ifa(in_dev) {
-               if (inet_ifa_match(fl4->saddr, ifa))
+               if (fl4->saddr == ifa->ifa_local)
                        return fl4->saddr;
        } endfor_ifa(in_dev);
 
index 5ddb1cb52bd405ed10cce43195a25607d136efbf..6d21068f9b5531e34c0f8be180e2b835fdaae0d7 100644 (file)
@@ -520,8 +520,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
        else
                mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 
-       if (skb_dst(skb))
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       skb_dst_update_pmtu(skb, mtu);
 
        if (skb->protocol == htons(ETH_P_IP)) {
                if (!skb_is_gso(skb) &&
index 949f432a5f04b5005eb88ad6d1f031046265dc26..51b1669334fe6baeea0045fcfdd631700c1ccbf2 100644 (file)
@@ -200,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
 
        mtu = dst_mtu(dst);
        if (skb->len > mtu) {
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+               skb_dst_update_pmtu(skb, mtu);
                if (skb->protocol == htons(ETH_P_IP)) {
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                  htonl(mtu));
index f08eebe60446e2e99d19bd0ea51e5fafb96aca63..8e053ad7cae260c545b254e8164a1c4a0487db5e 100644 (file)
@@ -2298,6 +2298,9 @@ adjudge_to_death:
                        tcp_send_active_reset(sk, GFP_ATOMIC);
                        __NET_INC_STATS(sock_net(sk),
                                        LINUX_MIB_TCPABORTONMEMORY);
+               } else if (!check_net(sock_net(sk))) {
+                       /* Not possible to send reset; just close */
+                       tcp_set_state(sk, TCP_CLOSE);
                }
        }
 
index b6a2aa1dcf56cbaa029b0ec6c5e512e33c90fff9..4d58e2ce0b5b181b39aeb12f8761ba016606dd43 100644 (file)
@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
                                        netdev_features_t features)
 {
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
+               return ERR_PTR(-EINVAL);
+
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                return ERR_PTR(-EINVAL);
 
index 968fda1983762e6d7c078a28ccfcbd9066788daf..388158c9d9f6b326321e0cc67e9b54916938a601 100644 (file)
@@ -48,11 +48,19 @@ static void tcp_write_err(struct sock *sk)
  *  to prevent DoS attacks. It is called when a retransmission timeout
  *  or zero probe timeout occurs on orphaned socket.
  *
+ *  Also close if our net namespace is exiting; in that case there is no
+ *  hope of ever communicating again since all netns interfaces are already
+ *  down (or about to be down), and we need to release our dst references,
+ *  which have been moved to the netns loopback interface, so the namespace
+ *  can finish exiting.  This condition is only possible if we are a kernel
+ *  socket, as those do not hold references to the namespace.
+ *
  *  Criteria is still not confirmed experimentally and may change.
  *  We kill the socket, if:
  *  1. If number of orphaned sockets exceeds an administratively configured
  *     limit.
  *  2. If we have strong memory pressure.
+ *  3. If our net namespace is exiting.
  */
 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
 {
@@ -81,6 +89,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
                return 1;
        }
+
+       if (!check_net(sock_net(sk))) {
+               /* Not possible to send reset; just close */
+               tcp_done(sk);
+               return 1;
+       }
+
        return 0;
 }
 
index 01801b77bd0da45764fd0e9a80f22b0e46633934..ea6e6e7df0eec8e79f631851846cf6957014c162 100644 (file)
@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
                goto out;
        }
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
+               goto out;
+
        if (!pskb_may_pull(skb, sizeof(struct udphdr)))
                goto out;
 
index e6265e2c274e48a54cfc08afb9fee6c9fc2ad194..20ca486b3cadf34c2e4fa6df17d44a178fbe3ddc 100644 (file)
@@ -92,6 +92,7 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 
        skb_reset_network_header(skb);
        skb_mac_header_rebuild(skb);
+       eth_hdr(skb)->h_proto = skb->protocol;
 
        err = 0;
 
index dd9627490c7ca22bda9fe3cca13b72c60f6865de..f52c314d4c97086fba003709f506ec7e408baba8 100644 (file)
@@ -149,6 +149,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
        if (!xo)
                goto out;
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
+               goto out;
+
        seq = xo->seq.low;
 
        x = skb->sp->xvec[skb->sp->len - 1];
index 4f7d8de56611472575862d50b9fd18f677d36e43..3763dc01e37477af36d4f1445d72d452e7528017 100644 (file)
@@ -166,7 +166,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
-static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
 {
        if (!np->autoflowlabel_set)
                return ip6_default_np_autolabel(net);
index 9a7cf355bc8c8ff67388456060c1f7e67d8762ee..1ee5584c3555b4758995af68144356315934efcc 100644 (file)
@@ -642,8 +642,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                if (rel_info > dst_mtu(skb_dst(skb2)))
                        goto out;
 
-               skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2,
-                                               rel_info);
+               skb_dst_update_pmtu(skb2, rel_info);
        }
 
        icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -1131,8 +1130,7 @@ route_lookup:
                mtu = 576;
        }
 
-       if (skb_dst(skb) && !t->parms.collect_md)
-               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       skb_dst_update_pmtu(skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
                *pmtu = mtu;
                err = -EMSGSIZE;
index dbb74f3c57a77f6c0fd8c337b8e127246c2d9b4e..8c184f84f35334ebbc97cf3f249c5ae71ad6826d 100644 (file)
@@ -483,7 +483,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
        mtu = dst_mtu(dst);
        if (!skb->ignore_df && skb->len > mtu) {
-               skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
+               skb_dst_update_pmtu(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        if (mtu < IPV6_MIN_MTU)
index 2d4680e0376f41deee6c999eadaf9409353e0b4a..e8ffb5b5d84e626fdec748aeb2d3e284d6256db1 100644 (file)
@@ -1336,7 +1336,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                break;
 
        case IPV6_AUTOFLOWLABEL:
-               val = np->autoflowlabel;
+               val = ip6_autoflowlabel(sock_net(sk), np);
                break;
 
        case IPV6_RECVFRAGSIZE:
index d7dc23c1b2ca32fb554cccf1fbf50f736a7f6f4c..3873d387713575558801b0352227efd4c4ac45f6 100644 (file)
@@ -934,8 +934,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        df = 0;
                }
 
-               if (tunnel->parms.iph.daddr && skb_dst(skb))
-                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+               if (tunnel->parms.iph.daddr)
+                       skb_dst_update_pmtu(skb, mtu);
 
                if (skb->len > mtu && !skb_is_gso(skb)) {
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
index d883c9204c01d525fcd51012c34afbf5f8af11ed..278e49cd67d4e2c7b0ab9138fabe84753d628b5a 100644 (file)
@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
 {
        struct tcphdr *th;
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
+               return ERR_PTR(-EINVAL);
+
        if (!pskb_may_pull(skb, sizeof(*th)))
                return ERR_PTR(-EINVAL);
 
index a0f89ad76f9d2233b9e048418069aacd92ac6a25..2a04dc9c781b5f236fbe6dc3804f5eeed174ee44 100644 (file)
@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                const struct ipv6hdr *ipv6h;
                struct udphdr *uh;
 
+               if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
+                       goto out;
+
                if (!pskb_may_pull(skb, sizeof(struct udphdr)))
                        goto out;
 
index 02556e356f87e94830e76bb6f4f519e4cf1eb4d7..dc93002ff9d1b88a9ac4ec750ae38f89c25dca73 100644 (file)
@@ -92,6 +92,7 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 
        skb_reset_network_header(skb);
        skb_mac_header_rebuild(skb);
+       eth_hdr(skb)->h_proto = skb->protocol;
 
        err = 0;
 
index d4e98f20fc2ac1c55a1f1db67498af900e0842ea..4a8d407f8902210b6edb1c9803877686687c4edb 100644 (file)
@@ -1387,8 +1387,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
        if (!csk)
                return -EINVAL;
 
-       /* We must prevent loops or risk deadlock ! */
-       if (csk->sk_family == PF_KCM)
+       /* Only allow TCP sockets to be attached for now */
+       if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
+           csk->sk_protocol != IPPROTO_TCP)
+               return -EOPNOTSUPP;
+
+       /* Don't allow listeners or closed sockets */
+       if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
                return -EOPNOTSUPP;
 
        psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
@@ -1405,9 +1410,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
                return err;
        }
 
-       sock_hold(csk);
-
        write_lock_bh(&csk->sk_callback_lock);
+
+       /* Check if sk_user_data is aready by KCM or someone else.
+        * Must be done under lock to prevent race conditions.
+        */
+       if (csk->sk_user_data) {
+               write_unlock_bh(&csk->sk_callback_lock);
+               strp_done(&psock->strp);
+               kmem_cache_free(kcm_psockp, psock);
+               return -EALREADY;
+       }
+
        psock->save_data_ready = csk->sk_data_ready;
        psock->save_write_space = csk->sk_write_space;
        psock->save_state_change = csk->sk_state_change;
@@ -1415,8 +1429,11 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
        csk->sk_data_ready = psock_data_ready;
        csk->sk_write_space = psock_write_space;
        csk->sk_state_change = psock_state_change;
+
        write_unlock_bh(&csk->sk_callback_lock);
 
+       sock_hold(csk);
+
        /* Finished initialization, now add the psock to the MUX. */
        spin_lock_bh(&mux->lock);
        head = &mux->psocks;
index 6b7ee71f40c63e879dc46d7bed025aac3def2c1f..ab7356e0ba8334ffa2b9cac5ca888ef8bbc48d3c 100644 (file)
@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock)
                              sizeof(val));
 }
 
-u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
+u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
 {
-       return tcp_sk(tc->t_sock->sk)->snd_nxt;
+       /* seq# of the last byte of data in tcp send buffer */
+       return tcp_sk(tc->t_sock->sk)->write_seq;
 }
 
 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
index 1aafbf7c30114155daec203fe8e0980249853ae9..864ca7d8f01942ef84325e7dcf4d0d330c7e4ef3 100644 (file)
@@ -54,7 +54,7 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
 void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
 void rds_tcp_restore_callbacks(struct socket *sock,
                               struct rds_tcp_connection *tc);
-u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
+u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
 u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
 extern struct rds_transport rds_tcp_transport;
index dc860d1bb6088929591bb670ef96079de7c1819c..9b76e0fa1722407b14d39d1bfc7e955f99964e67 100644 (file)
@@ -86,7 +86,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
                 * m_ack_seq is set to the sequence number of the last byte of
                 * header and data.  see rds_tcp_is_acked().
                 */
-               tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc);
+               tc->t_last_sent_nxt = rds_tcp_write_seq(tc);
                rm->m_ack_seq = tc->t_last_sent_nxt +
                                sizeof(struct rds_header) +
                                be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
@@ -98,7 +98,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
                        rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
 
                rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
-                        rm, rds_tcp_snd_nxt(tc),
+                        rm, rds_tcp_write_seq(tc),
                         (unsigned long long)rm->m_ack_seq);
        }
 
index df3110d695857e672f1a50f83399a7f63959dd00..07c10bac06a079380576c0d025c397fea990b2ec 100644 (file)
@@ -51,7 +51,7 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
        if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
                return 0;
 
-       return !memcmp(ptr + nbyte->hdr.off, nbyte->pattern, nbyte->hdr.len);
+       return !memcmp(ptr, nbyte->pattern, nbyte->hdr.len);
 }
 
 static struct tcf_ematch_ops em_nbyte_ops = {
index 275925b93b2909a32e9a5ab83a16e9b0f5717e24..35bc7106d1827a80f1135fe4797bec36cb7c669a 100644 (file)
@@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct sctphdr *sh;
 
+       if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
+               goto out;
+
        sh = sctp_hdr(skb);
        if (!pskb_may_pull(skb, sizeof(*sh)))
                goto out;
index 61f394d369bf86d69c44b84950632cd5c3519905..0a9b72fbd761f9c69ba63f651d971c228a70b948 100644 (file)
@@ -577,6 +577,8 @@ alloc_payload:
                get_page(page);
                sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
                sg_set_page(sg, page, copy, offset);
+               sg_unmark_end(sg);
+
                ctx->sg_plaintext_num_elem++;
 
                sk_mem_charge(sk, copy);
index 5d28abf87fbfe794e44612802d3304e8cf331592..c9473d698525a3862d6f8eda3f149b9934171795 100644 (file)
@@ -951,7 +951,7 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
                 * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
                 * but local send is not shutdown.
                 */
-               if (sk->sk_state == TCP_CLOSE) {
+               if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
                        if (!(sk->sk_shutdown & SEND_SHUTDOWN))
                                mask |= POLLOUT | POLLWRNORM;
 
index 30e5746085b8fcfc5aa8abc7a8a23753c510a630..ac9477189d1cc2e9013af88d549fa075144d4e8c 100644 (file)
@@ -102,6 +102,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 
        err = dev->xfrmdev_ops->xdo_dev_state_add(x);
        if (err) {
+               xso->dev = NULL;
                dev_put(dev);
                return err;
        }
index 42995741263386852b61a6a72a23669f4d134b5b..a3785f538018dc8a777f7768752d8f5f7cedb1e1 100644 (file)
@@ -317,7 +317,7 @@ retry:
 
        if (!type && try_load) {
                request_module("xfrm-offload-%d-%d", family, proto);
-               try_load = 0;
+               try_load = false;
                goto retry;
        }
 
@@ -2272,8 +2272,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
                        goto error;
        }
 
-       x->km.state = XFRM_STATE_VALID;
-
 error:
        return err;
 }
@@ -2282,7 +2280,13 @@ EXPORT_SYMBOL(__xfrm_init_state);
 
 int xfrm_init_state(struct xfrm_state *x)
 {
-       return __xfrm_init_state(x, true, false);
+       int err;
+
+       err = __xfrm_init_state(x, true, false);
+       if (!err)
+               x->km.state = XFRM_STATE_VALID;
+
+       return err;
 }
 
 EXPORT_SYMBOL(xfrm_init_state);
index bdb48e5dba0480aa4c3c6855be42cfb93f3bf335..7f52b8eb177db4978750caed402aa34f6a31d7b4 100644 (file)
@@ -598,13 +598,6 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
                        goto error;
        }
 
-       if (attrs[XFRMA_OFFLOAD_DEV]) {
-               err = xfrm_dev_state_add(net, x,
-                                        nla_data(attrs[XFRMA_OFFLOAD_DEV]));
-               if (err)
-                       goto error;
-       }
-
        if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
                                               attrs[XFRMA_REPLAY_ESN_VAL])))
                goto error;
@@ -620,6 +613,14 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
        /* override default values from above */
        xfrm_update_ae_params(x, attrs, 0);
 
+       /* configure the hardware if offload is requested */
+       if (attrs[XFRMA_OFFLOAD_DEV]) {
+               err = xfrm_dev_state_add(net, x,
+                                        nla_data(attrs[XFRMA_OFFLOAD_DEV]));
+               if (err)
+                       goto error;
+       }
+
        return x;
 
 error:
@@ -662,6 +663,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto out;
        }
 
+       if (x->km.state == XFRM_STATE_VOID)
+               x->km.state = XFRM_STATE_VALID;
+
        c.seq = nlh->nlmsg_seq;
        c.portid = nlh->nlmsg_pid;
        c.event = nlh->nlmsg_type;