]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
authorDavid S. Miller <davem@davemloft.net>
Mon, 15 Jun 2015 21:30:32 +0000 (14:30 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 15 Jun 2015 21:30:32 +0000 (14:30 -0700)
Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

This a bit large (and late) patchset that contains Netfilter updates for
net-next. Most relevantly br_netfilter fixes, ipset RCU support, removal of
x_tables percpu ruleset copy and rework of the nf_tables netdev support. More
specifically, they are:

1) Warn the user when there is a better protocol conntracker available, from
   Marcelo Ricardo Leitner.

2) Fix forwarding of IPv6 fragmented traffic in br_netfilter, from Bernhard
   Thaler. This comes with several patches to prepare the change in first place.

3) Get rid of special mtu handling of PPPoE/VLAN frames for br_netfilter. This
   is not needed anymore since now we use the largest fragment size to
   refragment, from Florian Westphal.

4) Restore vlan tag when refragmenting in br_netfilter, also from Florian.

5) Get rid of the percpu ruleset copy in x_tables, from Florian. Plus another
   follow up patch to refine it from Eric Dumazet.

6) Several ipset cleanups, fixes and finally RCU support, from Jozsef Kadlecsik.

7) Get rid of parens in Netfilter Kconfig files.

8) Attach the net_device to the basechain as opposed to the initial per table
   approach in the nf_tables netdev family.

9) Subscribe to netdev events to detect the removal and registration of a
   device that is referenced by a basechain.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
51 files changed:
Documentation/kernel-parameters.txt
Documentation/networking/udplite.txt
arch/blackfin/include/asm/io.h
arch/score/lib/string.S
arch/x86/kvm/mmu.c
block/blk-mq.c
block/genhd.c
drivers/block/Kconfig
drivers/block/zram/zram_drv.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/input/mouse/synaptics.c
drivers/iommu/intel-iommu.c
drivers/md/md.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/liquidio_image.h
drivers/net/ethernet/cavium/liquidio/octeon_device.c
drivers/net/ethernet/cavium/liquidio/octeon_droq.c
drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
drivers/net/ethernet/cavium/liquidio/request_manager.c
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_rq.c
drivers/net/ethernet/intel/igb/igb_ptp.c
include/linux/intel-iommu.h
include/linux/skbuff.h
kernel/sched/fair.c
kernel/trace/ring_buffer_benchmark.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/zsmalloc.c
net/bridge/br_multicast.c
net/core/flow_dissector.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/ip_output.c
net/ipv4/tcp_cdg.c
net/ipv6/ip6_input.c
net/ipv6/tcp_ipv6.c
net/mpls/af_mpls.c
net/sctp/auth.c
net/tipc/socket.c
net/wireless/wext-compat.c
scripts/checkpatch.pl

index 61ab1628a057cc2c4d8b11d892d834f7e5f7773a..6726139bd2899038e77ae15f9901351773dd324b 100644 (file)
@@ -1481,6 +1481,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        By default, super page will be supported if Intel IOMMU
                        has the capability. With this option, super page will
                        not be supported.
+               ecs_off [Default Off]
+                       By default, extended context tables will be supported if
+                       the hardware advertises that it has support both for the
+                       extended tables themselves, and also PASID support. With
+                       this option set, extended tables will not be used even
+                       on hardware which claims to support them.
 
        intel_idle.max_cstate=  [KNL,HW,ACPI,X86]
                        0       disables intel_idle and fall back on acpi_idle.
index d727a38291005f962848ed40a1ab11db4c167899..53a726855e49bfa4c313e46e15df1eec7cb610ae 100644 (file)
@@ -20,7 +20,7 @@
        files/UDP-Lite-HOWTO.txt
 
    o The Wireshark UDP-Lite WiKi (with capture files):
-       http://wiki.wireshark.org/Lightweight_User_Datagram_Protocol
+       https://wiki.wireshark.org/Lightweight_User_Datagram_Protocol
 
    o The Protocol Spec, RFC 3828, http://www.ietf.org/rfc/rfc3828.txt
 
index 4e8ad0523118d631ea24f6b9f8fd1c3ffb123194..6abebe82d4e93ed0329f271cd54e2af5c1bc38d2 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <asm/byteorder.h>
+#include <asm/def_LPBlackfin.h>
 
 #define __raw_readb bfin_read8
 #define __raw_readw bfin_read16
index 00b7d3a2fc60681253eb2e1c1b874e48bbd02a4a..16efa3ad037f7cffbdbb4a5ffcf57a5d25325648 100644 (file)
@@ -175,10 +175,10 @@ ENTRY(__clear_user)
        br      r3
 
        .section .fixup, "ax"
+99:
        br      r3
        .previous
        .section __ex_table, "a"
        .align  2
-99:
        .word   0b, 99b
        .previous
index 44a7d25154973437e0ce4233e142d01c43a948b9..b73337634214c209e250051cd21e00bd2436fcd6 100644 (file)
@@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        u64 entry, gentry, *spte;
        int npte;
        bool remote_flush, local_flush, zap_page;
-       union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
-               .cr0_wp = 1,
-               .cr4_pae = 1,
-               .nxe = 1,
-               .smep_andnot_wp = 1,
-               .smap_andnot_wp = 1,
-       };
+       union kvm_mmu_page_role mask = { };
+
+       mask.cr0_wp = 1;
+       mask.cr4_pae = 1;
+       mask.nxe = 1;
+       mask.smep_andnot_wp = 1;
+       mask.smap_andnot_wp = 1;
 
        /*
         * If we don't have indirect shadow pages, it means no page is
index e68b71b85a7eaf0e3097debe8bf4dc4078e7a038..594eea04266e6d05f7256255552a1c4c72c664f3 100644 (file)
@@ -1600,6 +1600,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
        return NOTIFY_OK;
 }
 
+/* hctx->ctxs will be freed in queue's release handler */
 static void blk_mq_exit_hctx(struct request_queue *q,
                struct blk_mq_tag_set *set,
                struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
@@ -1618,7 +1619,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
 
        blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
        blk_free_flush_queue(hctx->fq);
-       kfree(hctx->ctxs);
        blk_mq_free_bitmap(&hctx->ctx_map);
 }
 
@@ -1891,8 +1891,12 @@ void blk_mq_release(struct request_queue *q)
        unsigned int i;
 
        /* hctx kobj stays in hctx */
-       queue_for_each_hw_ctx(q, hctx, i)
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (!hctx)
+                       continue;
+               kfree(hctx->ctxs);
                kfree(hctx);
+       }
 
        kfree(q->queue_hw_ctx);
 
index 666e11b8398378d145552b18e51c190c18847a9f..ea982eadaf6380b974d6b1d39a7197085217ac91 100644 (file)
@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
        /* allocate ext devt */
        idr_preload(GFP_KERNEL);
 
-       spin_lock(&ext_devt_lock);
+       spin_lock_bh(&ext_devt_lock);
        idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
-       spin_unlock(&ext_devt_lock);
+       spin_unlock_bh(&ext_devt_lock);
 
        idr_preload_end();
        if (idx < 0)
@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
                return;
 
        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
-               spin_lock(&ext_devt_lock);
+               spin_lock_bh(&ext_devt_lock);
                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               spin_unlock(&ext_devt_lock);
+               spin_unlock_bh(&ext_devt_lock);
        }
 }
 
@@ -690,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
        } else {
                struct hd_struct *part;
 
-               spin_lock(&ext_devt_lock);
+               spin_lock_bh(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
                if (part && get_disk(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
-               spin_unlock(&ext_devt_lock);
+               spin_unlock_bh(&ext_devt_lock);
        }
 
        return disk;
index eb1fed5bd516ffac33c850eed47fad402250c686..3ccef9eba6f9dc53cecb785c23582cbdeb3b8618 100644 (file)
@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX
 
 config BLK_DEV_PMEM
        tristate "Persistent memory block device support"
+       depends on HAS_IOMEM
        help
          Saying Y here will allow you to use a contiguous range of reserved
          memory as one or more persistent block devices.
index 8dcbced0eafd5f8dc0a53dc8d8e9d4b37bad9bab..6e134f4759c0c9e98b93f221e7687004d4418342 100644 (file)
@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
        memset(&zram->stats, 0, sizeof(zram->stats));
        zram->disksize = 0;
        zram->max_comp_streams = 1;
+
        set_capacity(zram->disk, 0);
+       part_stat_set_all(&zram->disk->part0, 0);
 
        up_write(&zram->init_lock);
        /* I/O operation under all of CPU are done so let's free */
index 56e437e3158021a09641d188affc6129f0b1eda8..ae628001fd97873b67f99fb0128167858948afe6 100644 (file)
@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
                                               struct intel_gmbus,
                                               adapter);
        struct drm_i915_private *dev_priv = bus->dev_priv;
-       int i, reg_offset;
+       int i = 0, inc, try = 0, reg_offset;
        int ret = 0;
 
        intel_aux_display_runtime_get(dev_priv);
@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
 
        reg_offset = dev_priv->gpio_mmio_base;
 
+retry:
        I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
 
-       for (i = 0; i < num; i++) {
+       for (; i < num; i += inc) {
+               inc = 1;
                if (gmbus_is_index_read(msgs, i, num)) {
                        ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
-                       i += 1;  /* set i to the index of the read xfer */
+                       inc = 2; /* an index read is two msgs */
                } else if (msgs[i].flags & I2C_M_RD) {
                        ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
                } else {
@@ -525,6 +527,18 @@ clear_err:
                         adapter->name, msgs[i].addr,
                         (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
+       /*
+        * Passive adapters sometimes NAK the first probe. Retry the first
+        * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+        * has retries internally. See also the retry loop in
+        * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+        */
+       if (ret == -ENXIO && i == 0 && try++ == 0) {
+               DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
+                             adapter->name);
+               goto retry;
+       }
+
        goto out;
 
 timeout:
index e87d2f418de4f381d50471494e5fe8050de4bdb2..987b81f31b0e693cfe7d505b2f66eecc7eac6539 100644 (file)
@@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
 
        DRM_DEBUG_KMS("initialising analog device %d\n", device);
 
-       intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+       intel_sdvo_connector = intel_sdvo_connector_alloc();
        if (!intel_sdvo_connector)
                return false;
 
index e597ffc265633ef7439b2247301333a3affaeaea..dac78ad24b31558aa53d917fb802865b6a122b61 100644 (file)
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                else
                        radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
-               /* if there is no audio, set MINM_OVER_MAXP  */
-               if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
-                       radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                if (rdev->family < CHIP_RV770)
                        radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                /* use frac fb div on APUs */
@@ -1798,9 +1795,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
                        if ((crtc->mode.clock == test_crtc->mode.clock) &&
                            (adjusted_clock == test_adjusted_clock) &&
                            (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
-                           (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
-                           (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
-                            drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
+                           (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
                                return test_radeon_crtc->pll_id;
                }
        }
index b7ca4c51462120fab3ab146dd74f653e8bcb91cb..a7fdfa4f0857b3a416e67d79007a1da731455b80 100644 (file)
@@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev,
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
 
+       /*
+        * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
+        * after the CP ring have chew one packet at least. Hence here we stop
+        * and restart DPM after the radeon_ib_ring_tests().
+        */
+       if (rdev->pm.dpm_enabled &&
+           (rdev->pm.pm_method == PM_METHOD_DPM) &&
+           (rdev->family == CHIP_TURKS) &&
+           (rdev->flags & RADEON_IS_MOBILITY)) {
+               mutex_lock(&rdev->pm.mutex);
+               radeon_dpm_disable(rdev);
+               radeon_dpm_enable(rdev);
+               mutex_unlock(&rdev->pm.mutex);
+       }
+
        if ((radeon_testing & 1)) {
                if (rdev->accel_working)
                        radeon_test_moves(rdev);
index de42fc4a22b869296ff44c85c859678c6155ddd7..9c3377ca17b75ecd2092e4fd78a2238c126d88f1 100644 (file)
@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                /* make sure object fit at this offset */
                eoffset = soffset + size;
                if (soffset >= eoffset) {
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto error_unreserve;
                }
 
                last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
                if (last_pfn > rdev->vm_manager.max_pfn) {
                        dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
                                last_pfn, rdev->vm_manager.max_pfn);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto error_unreserve;
                }
 
        } else {
@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                                "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
                                soffset, tmp->bo, tmp->it.start, tmp->it.last);
                        mutex_unlock(&vm->mutex);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto error_unreserve;
                }
        }
 
@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                        tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
                        if (!tmp) {
                                mutex_unlock(&vm->mutex);
-                               return -ENOMEM;
+                               r = -ENOMEM;
+                               goto error_unreserve;
                        }
                        tmp->it.start = bo_va->it.start;
                        tmp->it.last = bo_va->it.last;
@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                r = radeon_vm_clear_bo(rdev, pt);
                if (r) {
                        radeon_bo_unref(&pt);
-                       radeon_bo_reserve(bo_va->bo, false);
                        return r;
                }
 
@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
 
        mutex_unlock(&vm->mutex);
        return 0;
+
+error_unreserve:
+       radeon_bo_unreserve(bo_va->bo);
+       return r;
 }
 
 /**
index 630af73e98c488a5e266e4ccb6eed5dba622f3d3..35c8d0ceabeebf989b8eeff5cd54ee8f3ac2e247 100644 (file)
@@ -150,6 +150,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                {ANY_BOARD_ID, 2961},
                1024, 5112, 2024, 4832
        },
+       {
+               (const char * const []){"LEN2000", NULL},
+               {ANY_BOARD_ID, ANY_BOARD_ID},
+               1024, 5113, 2021, 4832
+       },
        {
                (const char * const []){"LEN2001", NULL},
                {ANY_BOARD_ID, ANY_BOARD_ID},
@@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0045",
        "LEN0047",
        "LEN0049",
-       "LEN2000",
+       "LEN2000", /* S540 */
        "LEN2001", /* Edge E431 */
        "LEN2002", /* Edge E531 */
        "LEN2003",
index 2ffe589699448543c27a8f29d5274d3107b3c6e7..5ecfaf29933ad4634e2124544e3c800b9b309d44 100644 (file)
@@ -422,6 +422,14 @@ static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
+static int intel_iommu_ecs = 1;
+
+/* We only actually use ECS when PASID support (on the new bit 40)
+ * is also advertised. Some early implementations â€” the ones with
+ * PASID support on bit 28 â€” have issues even when we *only* use
+ * extended root/context tables. */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+                           ecap_pasid(iommu->ecap))
 
 int intel_iommu_gfx_mapped;
 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str)
                        printk(KERN_INFO
                                "Intel-IOMMU: disable supported super page\n");
                        intel_iommu_superpage = 0;
+               } else if (!strncmp(str, "ecs_off", 7)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: disable extended context table support\n");
+                       intel_iommu_ecs = 0;
                }
 
                str += strcspn(str, ",");
@@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
        struct context_entry *context;
        u64 *entry;
 
-       if (ecap_ecs(iommu->ecap)) {
+       if (ecs_enabled(iommu)) {
                if (devfn >= 0x80) {
                        devfn -= 0x80;
                        entry = &root->hi;
@@ -806,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu)
                if (context)
                        free_pgtable_page(context);
 
-               if (!ecap_ecs(iommu->ecap))
+               if (!ecs_enabled(iommu))
                        continue;
 
                context = iommu_context_addr(iommu, i, 0x80, 0);
@@ -1141,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
        unsigned long flag;
 
        addr = virt_to_phys(iommu->root_entry);
-       if (ecap_ecs(iommu->ecap))
+       if (ecs_enabled(iommu))
                addr |= DMA_RTADDR_RTT;
 
        raw_spin_lock_irqsave(&iommu->register_lock, flag);
index 27506302eb7aa42557bfc01547274957ccbace50..4dbed4a67aaf40e3c04bde925870c24d13cd1b4e 100644 (file)
@@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                                err = -EBUSY;
                }
                spin_unlock(&mddev->lock);
-               return err;
+               return err ?: len;
        }
        err = mddev_lock(mddev);
        if (err)
@@ -4217,13 +4217,14 @@ action_store(struct mddev *mddev, const char *page, size_t len)
                        set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                else
                        clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-               flush_workqueue(md_misc_wq);
-               if (mddev->sync_thread) {
-                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                       if (mddev_lock(mddev) == 0) {
+               if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
+                   mddev_lock(mddev) == 0) {
+                       flush_workqueue(md_misc_wq);
+                       if (mddev->sync_thread) {
+                               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                                md_reap_sync_thread(mddev);
-                               mddev_unlock(mddev);
                        }
+                       mddev_unlock(mddev);
                }
        } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
                   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -8261,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev)
        if (mddev_is_clustered(mddev))
                md_cluster_ops->metadata_update_finish(mddev);
        clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+       clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
index e793ab6b35705e0ed1ad6904ebe9353b6dbf6fd6..f55c3f35b7463141086afb727785c775c5185d76 100644 (file)
@@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev)
 
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+       clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
 
index 553d54b870528f0917e7518a9a783a28636d884a..b6793d2e051f3b278405f236e6623980bcdf1d04 100644 (file)
@@ -7354,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev)
 
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+       clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
        mddev->sync_thread = md_register_thread(md_do_sync, mddev,
index c7d8674dbe75724a5ca3cea8f4ea5cf496cc6673..5e7a0e270d547d5ebac9c176f48fc4d1c7e09cf2 100644 (file)
@@ -43,6 +43,7 @@ config        THUNDER_NIC_BGX
 
 config LIQUIDIO
        tristate "Cavium LiquidIO support"
+       depends on 64BIT
        select PTP_1588_CLOCK
        select FW_LOADER
        select LIBCRC32
index d23f494e258236b40c1a186ded01b94d668cad65..8ad7425f89bffc31e79ed31ea52a2822f8d47a82 100644 (file)
@@ -537,7 +537,7 @@ void lio_cn6xxx_disable_interrupt(void *chip)
        mmiowb();
 }
 
-void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
+static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
 {
        /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
         * to determine the PCIE port #
index c75f517379979420a2d400d6584736229642c2fa..0660deecc2c9a77f88e4eb9a0dbe02a68d0fec86 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/ip.h>
+#include <net/ip.h>
 #include <linux/ipv6.h>
 #include <linux/net_tstamp.h>
 #include <linux/if_vlan.h>
@@ -210,7 +211,7 @@ static int liquidio_probe(struct pci_dev *pdev,
 static struct handshake handshake[MAX_OCTEON_DEVICES];
 static struct completion first_stage;
 
-void octeon_droq_bh(unsigned long pdev)
+static void octeon_droq_bh(unsigned long pdev)
 {
        int q_no;
        int reschedule = 0;
@@ -230,7 +231,7 @@ void octeon_droq_bh(unsigned long pdev)
                tasklet_schedule(&oct_priv->droq_tasklet);
 }
 
-int lio_wait_for_oq_pkts(struct octeon_device *oct)
+static int lio_wait_for_oq_pkts(struct octeon_device *oct)
 {
        struct octeon_device_priv *oct_priv =
                (struct octeon_device_priv *)oct->priv;
@@ -2615,7 +2616,7 @@ static inline int is_ip_fragmented(struct sk_buff *skb)
         * with more to follow; the current offset could be 0 ).
         * -  ths offset field is non-zero.
         */
-       return htons(ip_hdr(skb)->frag_off) & 0x3fff;
+       return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0;
 }
 
 static inline int is_ipv6(struct sk_buff *skb)
@@ -3080,7 +3081,7 @@ static int __init liquidio_init(void)
        return 0;
 }
 
-int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
+static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
 {
        struct octeon_device *oct = (struct octeon_device *)buf;
        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
index 13c680bf0556068a30b971b7b56b682633cdd615..93819bd8602b66e925c0e7f2ed1f155d801919bc 100644 (file)
@@ -35,9 +35,9 @@
 #define LIO_MAX_IMAGES 16
 #define LIO_NIC_MAGIC 0x434E4943     /* "CNIC" */
 struct octeon_firmware_desc {
-       u64 addr;
-       u32 len;
-       u32 crc32;         /* crc32 of image */
+       __be64 addr;
+       __be32 len;
+       __be32 crc32;         /* crc32 of image */
 };
 
 /* Following the header is a list of 64-bit aligned binary images,
@@ -45,13 +45,13 @@ struct octeon_firmware_desc {
  * Numeric fields are in network byte order.
  */
 struct octeon_firmware_file_header {
-       u32 magic;
+       __be32 magic;
        char version[LIO_MAX_FIRMWARE_VERSION_LEN];
        char bootcmd[LIO_MAX_BOOTCMD_LEN];
-       u32 num_images;
+       __be32 num_images;
        struct octeon_firmware_desc desc[LIO_MAX_IMAGES];
-       u32 pad;
-       u32 crc32;         /* header checksum */
+       __be32 pad;
+       __be32 crc32;         /* header checksum */
 };
 
 #endif /* _LIQUIDIO_IMAGE_H_ */
index 2ca91657295f13643935d91b94694d16c57f2746..0d3106b464b29548ddca47eaebb56d08e879b552 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/crc32.h>
 #include <linux/kthread.h>
 #include <linux/netdevice.h>
+#include <linux/vmalloc.h>
 #include "octeon_config.h"
 #include "liquidio_common.h"
 #include "octeon_droq.h"
@@ -463,7 +464,7 @@ static u32 octeon_device_count;
 
 static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
 
-void oct_set_config_info(int oct_id, int conf_type)
+static void oct_set_config_info(int oct_id, int conf_type)
 {
        if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
                conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
@@ -570,7 +571,7 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
 
        h = (struct octeon_firmware_file_header *)data;
 
-       if (h->magic != be32_to_cpu(LIO_NIC_MAGIC)) {
+       if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
                dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
                return -EINVAL;
        }
@@ -1108,11 +1109,12 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
                (u32)recv_pkt->rh.r_core_drv_init.app_mode),
                sizeof(app_name) - 1);
        oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
-       if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP)
+       if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
                oct->fw_info.max_nic_ports =
                        (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
                oct->fw_info.num_gmx_ports =
                        (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
+       }
 
        if (oct->fw_info.max_nic_ports < num_nic_ports) {
                dev_err(&oct->pci_dev->dev,
index 60a186f1609b250f7c506c057e0f49390bfada18..94b502a0cf33e54b954768688c3e35c056ed1d55 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/pci.h>
 #include <linux/kthread.h>
 #include <linux/netdevice.h>
+#include <linux/vmalloc.h>
 #include "octeon_config.h"
 #include "liquidio_common.h"
 #include "octeon_droq.h"
index 9c0fd6100405683520c9f335f736f5b48dc871b3..5aecef87037740bb19a33fc6a61ce699e775bfec 100644 (file)
@@ -174,7 +174,7 @@ octeon_pci_write_core_mem(struct octeon_device *oct,
 
 u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
 {
-       u64 ret;
+       __be64 ret;
 
        __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
 
@@ -183,7 +183,7 @@ u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
 
 u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
 {
-       u32 ret;
+       __be32 ret;
 
        __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
 
@@ -193,7 +193,7 @@ u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
 void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
                               u32 val)
 {
-       u32 t = cpu_to_be32(val);
+       __be32 t = cpu_to_be32(val);
 
        __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
 }
index adb4284634956faf64b508d649375bf0e879061f..356796bf9b871e82f4e300206e24757072bfa870 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/pci.h>
 #include <linux/kthread.h>
 #include <linux/netdevice.h>
+#include <linux/vmalloc.h>
 #include "octeon_config.h"
 #include "liquidio_common.h"
 #include "octeon_droq.h"
@@ -453,9 +454,10 @@ update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
        if (iq->flush_index != iq->octeon_read_index)
                inst_processed = lio_process_iq_request_list(oct, iq);
 
-       if (inst_processed)
+       if (inst_processed) {
                atomic_sub(inst_processed, &iq->instr_pending);
                iq->stats.instr_processed += inst_processed;
+       }
 }
 
 static void
index 73874b2575bf1e85983feee3fa4b67ff67431d8c..f3f1601a76f37ffe7886da67427f6b79e8435321 100644 (file)
@@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev,
 {
        struct enic *enic = netdev_priv(netdev);
        struct vnic_devcmd_fw_info *fw_info;
+       int err;
 
-       enic_dev_fw_info(enic, &fw_info);
+       err = enic_dev_fw_info(enic, &fw_info);
+       /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+        * For other failures, like devcmd failure, we return previously
+        * recorded info.
+        */
+       if (err == -ENOMEM)
+               return;
 
        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
        struct enic *enic = netdev_priv(netdev);
        struct vnic_stats *vstats;
        unsigned int i;
-
-       enic_dev_stats_dump(enic, &vstats);
+       int err;
+
+       err = enic_dev_stats_dump(enic, &vstats);
+       /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+        * For other failures, like devcmd failure, we return previously
+        * recorded stats.
+        */
+       if (err == -ENOMEM)
+               return;
 
        for (i = 0; i < enic_n_tx_stats; i++)
                *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
index 204bd182473bceaaabaa5b1eba5ed618de751808..eadae1b412c652974dde24a9a76c5d74a8c3fa29 100644 (file)
@@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
 {
        struct enic *enic = netdev_priv(netdev);
        struct vnic_stats *stats;
+       int err;
 
-       enic_dev_stats_dump(enic, &stats);
+       err = enic_dev_stats_dump(enic, &stats);
+       /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+        * For other failures, like devcmd failure, we return previously
+        * recorded stats.
+        */
+       if (err == -ENOMEM)
+               return net_stats;
 
        net_stats->tx_packets = stats->tx.tx_frames_ok;
        net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
                 */
                enic_calc_int_moderation(enic, &enic->rq[rq]);
 
+       enic_poll_unlock_napi(&enic->rq[rq]);
        if (work_done < work_to_do) {
 
                /* Some work done, but not enough to stay in polling,
@@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
                        enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
        }
-       enic_poll_unlock_napi(&enic->rq[rq]);
 
        return work_done;
 }
index 36a2ed606c911f21355360fad81eb39b18162c59..c4b2183bf352fb2a1881001777df91857c2d1f79 100644 (file)
@@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq,
        struct vnic_rq_buf *buf;
        u32 fetch_index;
        unsigned int count = rq->ring.desc_count;
+       int i;
 
        buf = rq->to_clean;
 
-       while (vnic_rq_desc_used(rq) > 0) {
-
+       for (i = 0; i < rq->ring.desc_count; i++) {
                (*buf_clean)(rq, buf);
-
-               buf = rq->to_clean = buf->next;
-               rq->ring.desc_avail++;
+               buf = buf->next;
        }
+       rq->ring.desc_avail = rq->ring.desc_count - 1;
 
        /* Use current fetch_index as the ring starting point */
        fetch_index = ioread32(&rq->ctrl->fetch_index);
index e3b9b63ad01083cb987429f57c9ebef84d86f4db..c3a9392cbc192229f4178c913fad8ab64d8c44c3 100644 (file)
@@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
                        igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
                        igb->perout[i].period.tv_sec = ts.tv_sec;
                        igb->perout[i].period.tv_nsec = ts.tv_nsec;
-                       wr32(trgttiml, rq->perout.start.sec);
-                       wr32(trgttimh, rq->perout.start.nsec);
+                       wr32(trgttimh, rq->perout.start.sec);
+                       wr32(trgttiml, rq->perout.start.nsec);
                        tsauxc |= tsauxc_mask;
                        tsim |= tsim_mask;
                } else {
index 796ef9645827f000cb76ce4cd8637dc6cfa4db7a..a240e61a7700da6ca9d0f751967829b9ba47676d 100644 (file)
@@ -115,13 +115,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
  * Extended Capability Register
  */
 
+#define ecap_pasid(e)          ((e >> 40) & 0x1)
 #define ecap_pss(e)            ((e >> 35) & 0x1f)
 #define ecap_eafs(e)           ((e >> 34) & 0x1)
 #define ecap_nwfs(e)           ((e >> 33) & 0x1)
 #define ecap_srs(e)            ((e >> 31) & 0x1)
 #define ecap_ers(e)            ((e >> 30) & 0x1)
 #define ecap_prs(e)            ((e >> 29) & 0x1)
-#define ecap_pasid(e)          ((e >> 28) & 0x1)
+/* PASID support used to be on bit 28 */
 #define ecap_dis(e)            ((e >> 27) & 0x1)
 #define ecap_nest(e)           ((e >> 26) & 0x1)
 #define ecap_mts(e)            ((e >> 25) & 0x1)
index 32b105e682b3fef890a0c2318f144203df218db0..d6cdd6e87d53bcd1b4f390f61f73b1c91b076bdd 100644 (file)
@@ -2748,8 +2748,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
                    __wsum csum);
 
-static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
-                                        int len, void *data, int hlen, void *buffer)
+static inline void * __must_check
+__skb_header_pointer(const struct sk_buff *skb, int offset,
+                    int len, void *data, int hlen, void *buffer)
 {
        if (hlen - offset >= len)
                return data + offset;
@@ -2761,8 +2762,8 @@ static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
        return buffer;
 }
 
-static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
-                                      int len, void *buffer)
+static inline void * __must_check
+skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
 {
        return __skb_header_pointer(skb, offset, len, skb->data,
                                    skb_headlen(skb), buffer);
index ffeaa4105e48a36105ecaea8967082e1e7a7af98..c2980e8733bcb9da333f5d06e85441a78f1097a8 100644 (file)
@@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work)
        }
        for (; vma; vma = vma->vm_next) {
                if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
-                       is_vm_hugetlb_page(vma)) {
+                       is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
                        continue;
                }
 
index 13d945c0d03f2bda5802971484b21bbe9f65301f..1b28df2d91042de97566454a80dcb36d24674a49 100644 (file)
@@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void)
 
        if (producer_fifo >= 0) {
                struct sched_param param = {
-                       .sched_priority = consumer_fifo
+                       .sched_priority = producer_fifo
                };
                sched_setscheduler(producer, SCHED_FIFO, &param);
        } else
index 14c2f2017e37cc405e52cb12bc30b128997f1f8e..a04225d372ba3ab77516b970c10135b19def3ac4 100644 (file)
@@ -2323,6 +2323,8 @@ done_restock:
        css_get_many(&memcg->css, batch);
        if (batch > nr_pages)
                refill_stock(memcg, batch - nr_pages);
+       if (!(gfp_mask & __GFP_WAIT))
+               goto done;
        /*
         * If the hierarchy is above the normal consumption range,
         * make the charging task trim their excess contribution.
@@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!mem_cgroup_is_root(memcg))
                page_counter_uncharge(&memcg->memory, 1);
 
-       /* XXX: caller holds IRQ-safe mapping->tree_lock */
-       VM_BUG_ON(!irqs_disabled());
-
+       /* Caller disabled preemption with mapping->tree_lock */
        mem_cgroup_charge_statistics(memcg, page, -1);
        memcg_check_events(memcg, page);
 }
index 457bde530cbedcf0dea2f35e219466de0acf204d..9e88f749aa512395daea45f2727545fa0f281533 100644 (file)
@@ -1969,8 +1969,10 @@ void try_offline_node(int nid)
                 * wait_table may be allocated from boot memory,
                 * here only free if it's allocated by vmalloc.
                 */
-               if (is_vmalloc_addr(zone->wait_table))
+               if (is_vmalloc_addr(zone->wait_table)) {
                        vfree(zone->wait_table);
+                       zone->wait_table = NULL;
+               }
        }
 }
 EXPORT_SYMBOL(try_offline_node);
index 08bd7a3d464a9c6959a39e269d2284600e750a50..a8b5e749e84e7dbd50d325eecf84a47316145598 100644 (file)
@@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool)
 
 static void destroy_handle_cache(struct zs_pool *pool)
 {
-       kmem_cache_destroy(pool->handle_cachep);
+       if (pool->handle_cachep)
+               kmem_cache_destroy(pool->handle_cachep);
 }
 
 static unsigned long alloc_handle(struct zs_pool *pool)
index 0b38ee98024b724c15285b2baadf3e461145ceed..2e246a1a9b434ca91a74d391ae41fe7b8ff36938 100644 (file)
@@ -1164,6 +1164,9 @@ static void br_multicast_add_router(struct net_bridge *br,
        struct net_bridge_port *p;
        struct hlist_node *slot = NULL;
 
+       if (!hlist_unhashed(&port->rlist))
+               return;
+
        hlist_for_each_entry(p, &br->router_list, rlist) {
                if ((unsigned long) port >= (unsigned long) p)
                        break;
@@ -1191,12 +1194,8 @@ static void br_multicast_mark_router(struct net_bridge *br,
        if (port->multicast_router != 1)
                return;
 
-       if (!hlist_unhashed(&port->rlist))
-               goto timer;
-
        br_multicast_add_router(br, port);
 
-timer:
        mod_timer(&port->multicast_router_timer,
                  now + br->multicast_querier_interval);
 }
index 77e22e4fc89826507b5f2ce197d62b4c1dff1b0e..476e5dda59e19822dba98a931369ff2666c59c0d 100644 (file)
@@ -299,8 +299,8 @@ mpls:
                if (!hdr)
                        return false;
 
-               if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) ==
-                    MPLS_LABEL_ENTROPY) {
+               if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
+                    MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
                        if (skb_flow_dissector_uses_key(flow_dissector,
                                                        FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
                                key_keyid = skb_flow_dissector_target(flow_dissector,
@@ -327,6 +327,7 @@ mpls:
                return false;
        }
 
+ip_proto_again:
        switch (ip_proto) {
        case IPPROTO_GRE: {
                struct gre_hdr {
@@ -383,6 +384,24 @@ mpls:
                }
                goto again;
        }
+       case NEXTHDR_HOP:
+       case NEXTHDR_ROUTING:
+       case NEXTHDR_DEST: {
+               u8 _opthdr[2], *opthdr;
+
+               if (proto != htons(ETH_P_IPV6))
+                       break;
+
+               opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
+                                             data, hlen, &_opthdr);
+               if (!opthdr)
+                       return false;
+
+               ip_proto = opthdr[0];
+               nhoff += (opthdr[1] + 1) << 3;
+
+               goto ip_proto_again;
+       }
        case IPPROTO_IPIP:
                proto = htons(ETH_P_IP);
                goto ip;
index 9bac0e6f8dfa22c71666c0ba5da5bb3d1a014251..b6a19ca0f99e49c7406f1fedb6c59433bbd7fd38 100644 (file)
@@ -4467,7 +4467,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
 
                while (order) {
                        if (npages >= 1 << order) {
-                               page = alloc_pages(gfp_mask |
+                               page = alloc_pages((gfp_mask & ~__GFP_WAIT) |
                                                   __GFP_COMP |
                                                   __GFP_NOWARN |
                                                   __GFP_NORETRY,
index e72633c346b197b9dd6c94800fa0c55110aedfb2..7063c329c1b60f4b881e38b553ff7da0539a93fd 100644 (file)
@@ -354,15 +354,12 @@ void sk_clear_memalloc(struct sock *sk)
 
        /*
         * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
-        * progress of swapping. However, if SOCK_MEMALLOC is cleared while
-        * it has rmem allocations there is a risk that the user of the
-        * socket cannot make forward progress due to exceeding the rmem
-        * limits. By rights, sk_clear_memalloc() should only be called
-        * on sockets being torn down but warn and reset the accounting if
-        * that assumption breaks.
+        * progress of swapping. SOCK_MEMALLOC may be cleared while
+        * it has rmem allocations due to the last swapfile being deactivated
+        * but there is a risk that the socket is unusable due to exceeding
+        * the rmem limits. Reclaim the reserves and obey rmem limits again.
         */
-       if (WARN_ON(sk->sk_forward_alloc))
-               sk_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 }
 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
 
@@ -1872,7 +1869,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
 
        pfrag->offset = 0;
        if (SKB_FRAG_PAGE_ORDER) {
-               pfrag->page = alloc_pages(gfp | __GFP_COMP |
+               pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP |
                                          __GFP_NOWARN | __GFP_NORETRY,
                                          SKB_FRAG_PAGE_ORDER);
                if (likely(pfrag->page)) {
index 19d7e43b53704be956bc7ce2a034def74d73a14e..6bf89a6312bc1c71da41ad0a1ebdbf819504367d 100644 (file)
@@ -172,7 +172,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
 
-static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
+static int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct rtable *rt = (struct rtable *)dst;
index a52ce2db53f2d3c8f1d032628226b4c257c31dc2..8c6fd3d5e40feeb3c0b422d0e697e1a674b4f576 100644 (file)
@@ -145,7 +145,7 @@ static void tcp_cdg_hystart_update(struct sock *sk)
                return;
 
        if (hystart_detect & HYSTART_ACK_TRAIN) {
-               u32 now_us = local_clock() / NSEC_PER_USEC;
+               u32 now_us = div_u64(local_clock(), NSEC_PER_USEC);
 
                if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) {
                        ca->last_ack = now_us;
index 41a73da371a9230511f8c3e58351b3cf0034d1dc..f2e464eba5efdb7b2a8abe3c2cecedae473777e7 100644 (file)
@@ -212,13 +212,13 @@ static int ip6_input_finish(struct sock *sk, struct sk_buff *skb)
         */
 
        rcu_read_lock();
+resubmit:
        idev = ip6_dst_idev(skb_dst(skb));
        if (!pskb_pull(skb, skb_transport_offset(skb)))
                goto discard;
        nhoff = IP6CB(skb)->nhoff;
        nexthdr = skb_network_header(skb)[nhoff];
 
-resubmit:
        raw = raw6_local_deliver(skb, nexthdr);
        ipprot = rcu_dereference(inet6_protos[nexthdr]);
        if (ipprot) {
@@ -246,12 +246,10 @@ resubmit:
                        goto discard;
 
                ret = ipprot->handler(skb);
-               if (ret < 0) {
-                       nexthdr = -ret;
+               if (ret > 0)
                        goto resubmit;
-               } else if (ret == 0) {
+               else if (ret == 0)
                        IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
-               }
        } else {
                if (!raw) {
                        if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
index 45a7176ed460681558808439f20e1622423f4c32..6748c4277affad71cd721e3a985af10c31c047ad 100644 (file)
@@ -120,7 +120,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct in6_addr *saddr = NULL, *final_p, final;
-       struct rt6_info *rt;
        struct flowi6 fl6;
        struct dst_entry *dst;
        int addr_type;
@@ -258,7 +257,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk->sk_gso_type = SKB_GSO_TCPV6;
        __ip6_dst_store(sk, dst, NULL, NULL);
 
-       rt = (struct rt6_info *) dst;
        if (tcp_death_row.sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp &&
            ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
index bff427f31924706f6e4b8b5259ae64f1dd932f7f..1f93a5978f2ad43fc81a16427e34d07ca2c0f34e 100644 (file)
@@ -564,6 +564,17 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
        case NETDEV_UNREGISTER:
                mpls_ifdown(dev);
                break;
+       case NETDEV_CHANGENAME:
+               mdev = mpls_dev_get(dev);
+               if (mdev) {
+                       int err;
+
+                       mpls_dev_sysctl_unregister(mdev);
+                       err = mpls_dev_sysctl_register(dev, mdev);
+                       if (err)
+                               return notifier_from_errno(err);
+               }
+               break;
        }
        return NOTIFY_OK;
 }
index fb7976aee61c84f38aecdc5c5f0d8be20e577fa9..4f15b7d730e13d6aaa58ba7a28262c9831afea95 100644 (file)
@@ -381,13 +381,14 @@ nomem:
 }
 
 
-/* Public interface to creat the association shared key.
+/* Public interface to create the association shared key.
  * See code above for the algorithm.
  */
 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
 {
        struct sctp_auth_bytes  *secret;
        struct sctp_shared_key *ep_key;
+       struct sctp_chunk *chunk;
 
        /* If we don't support AUTH, or peer is not capable
         * we don't need to do anything.
@@ -410,6 +411,14 @@ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
        sctp_auth_key_put(asoc->asoc_shared_key);
        asoc->asoc_shared_key = secret;
 
+       /* Update send queue in case any chunk already in there now
+        * needs authenticating
+        */
+       list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) {
+               if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc))
+                       chunk->auth = 1;
+       }
+
        return 0;
 }
 
index 30ea82a9b0f13b44d34767241586a3d9648b906e..46b6ed534ef224e5e1049b34eb9ebe34c27b383d 100644 (file)
@@ -2140,11 +2140,17 @@ static void tipc_sk_timeout(unsigned long data)
        peer_node = tsk_peer_node(tsk);
 
        if (tsk->probing_state == TIPC_CONN_PROBING) {
-               /* Previous probe not answered -> self abort */
-               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
-                                     TIPC_CONN_MSG, SHORT_H_SIZE, 0,
-                                     own_node, peer_node, tsk->portid,
-                                     peer_port, TIPC_ERR_NO_PORT);
+               if (!sock_owned_by_user(sk)) {
+                       sk->sk_socket->state = SS_DISCONNECTING;
+                       tsk->connected = 0;
+                       tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
+                                             tsk_peer_port(tsk));
+                       sk->sk_state_change(sk);
+               } else {
+                       /* Try again later */
+                       sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
+               }
+
        } else {
                skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
                                      INT_H_SIZE, 0, peer_node, own_node,
index fff1bef6ed6d916f9019a63d708652f4ab07cddf..fd682832a0e3635d52c734871d5402d270336dc3 100644 (file)
@@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
        wdev_unlock(wdev);
 
+       memset(&sinfo, 0, sizeof(sinfo));
+
        if (rdev_get_station(rdev, dev, bssid, &sinfo))
                return NULL;
 
index 89b1df4e72ab3423bce45011fb03f86c193f5ad4..c5ec977b9c3786097b214e1c835efd8fa337c173 100755 (executable)
@@ -3169,12 +3169,12 @@ sub process {
                }
 
 # check for global initialisers.
-               if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) {
+               if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*(?:0|NULL|false)\s*;/) {
                        if (ERROR("GLOBAL_INITIALISERS",
                                  "do not initialise globals to 0 or NULL\n" .
                                      $herecurr) &&
                            $fix) {
-                               $fixed[$fixlinenr] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/;
+                               $fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*(0|NULL|false)\s*;/$1;/;
                        }
                }
 # check for static initialisers.