]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge tag 'pm-5.11-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Feb 2021 20:03:35 +0000 (12:03 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Feb 2021 20:03:35 +0000 (12:03 -0800)
Pull power management fixes from Rafael Wysocki:
 "Address a performance regression related to scale-invariance on x86
  that may prevent turbo CPU frequencies from being used in certain
  workloads on systems using acpi-cpufreq as the CPU performance scaling
  driver and schedutil as the scaling governor"

* tag 'pm-5.11-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpufreq: ACPI: Update arch scale-invariance max perf ratio if CPPC is not there
  cpufreq: ACPI: Extend frequency tables to cover boost frequencies

78 files changed:
.mailmap
Documentation/dev-tools/kasan.rst
MAINTAINERS
drivers/acpi/acpica/nsrepair2.c
drivers/dma/dmaengine.c
drivers/dma/dw/core.c
drivers/dma/idxd/device.c
drivers/dma/idxd/dma.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/irq.c
drivers/dma/ti/k3-udma.c
drivers/i3c/master/mipi-i3c-hci/core.c
drivers/net/dsa/ocelot/felix.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/enetc/enetc_hw.h
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_io.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ipa/gsi.c
drivers/net/usb/qmi_wwan.c
drivers/net/wan/hdlc_x25.c
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/xen-netback/rx.c
fs/Kconfig
fs/nilfs2/file.c
fs/squashfs/block.c
fs/squashfs/export.c
fs/squashfs/id.c
fs/squashfs/squashfs_fs_sb.h
fs/squashfs/super.c
fs/squashfs/xattr.h
fs/squashfs/xattr_id.c
include/asm-generic/vmlinux.lds.h
include/linux/netdevice.h
include/linux/uio.h
include/net/switchdev.h
include/soc/mscc/ocelot.h
kernel/bpf/stackmap.c
kernel/bpf/verifier.c
kernel/trace/bpf_trace.c
kernel/trace/trace_events.c
lib/iov_iter.c
mm/kasan/hw_tags.c
mm/memcontrol.c
mm/mremap.c
mm/slub.c
net/bridge/br_mrp.c
net/bridge/br_mrp_switchdev.c
net/bridge/br_private_mrp.h
net/core/datagram.c
net/core/dev.c
net/dsa/dsa2.c
net/mac80211/Kconfig
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_api.c
net/netfilter/xt_recent.c
net/qrtr/tun.c
net/rxrpc/call_object.c
net/sctp/proc.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/hyperv_transport.c
net/vmw_vsock/virtio_transport_common.c
tools/testing/selftests/net/tls.c
tools/testing/selftests/net/txtimestamp.c
tools/testing/selftests/netfilter/nft_meta.sh
tools/testing/selftests/vm/run_vmtests [deleted file]
tools/testing/selftests/vm/run_vmtests.sh [new file with mode: 0755]

index d674968df008b6b242d84bed04a7ddbdac0ba278..7fdf87b24fe84f37c1f3eddf13341d2a199f80d9 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -37,6 +37,7 @@ Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
 Andy Adamson <andros@citi.umich.edu>
 Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
 Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
index 1651d961f06a67ab1d3fc5858023c761658c346c..a248ac3941bed10022329c14d15e29f157a0ec57 100644 (file)
@@ -163,8 +163,7 @@ particular KASAN features.
 - ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
 
 - ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
-  traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
-  ``off``).
+  traces collection (default: ``on``).
 
 - ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
   report or also panic the kernel (default: ``report``).
index 667d03852191fbbcb6827eaa7bdbd9bbfc8c0a4f..64c7169db617604f0b6d0490a9ef0371f14dd8ca 100644 (file)
@@ -9559,7 +9559,7 @@ F:        Documentation/hwmon/k8temp.rst
 F:     drivers/hwmon/k8temp.c
 
 KASAN
-M:     Andrey Ryabinin <aryabinin@virtuozzo.com>
+M:     Andrey Ryabinin <ryabinin.a.a@gmail.com>
 R:     Alexander Potapenko <glider@google.com>
 R:     Dmitry Vyukov <dvyukov@google.com>
 L:     kasan-dev@googlegroups.com
index d2c8d8279e7a2b45c9db23f63181d5f4e8bd0d07..24c197d91f2920a29f260885e63f66f9875373de 100644 (file)
@@ -495,8 +495,9 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
                   union acpi_operand_object **return_object_ptr)
 {
        union acpi_operand_object *return_object = *return_object_ptr;
-       char *dest;
+       union acpi_operand_object *new_string;
        char *source;
+       char *dest;
 
        ACPI_FUNCTION_NAME(ns_repair_HID);
 
@@ -517,6 +518,13 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
                return_ACPI_STATUS(AE_OK);
        }
 
+       /* It is simplest to always create a new string object */
+
+       new_string = acpi_ut_create_string_object(return_object->string.length);
+       if (!new_string) {
+               return_ACPI_STATUS(AE_NO_MEMORY);
+       }
+
        /*
         * Remove a leading asterisk if present. For some unknown reason, there
         * are many machines in the field that contains IDs like this.
@@ -526,7 +534,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
        source = return_object->string.pointer;
        if (*source == '*') {
                source++;
-               return_object->string.length--;
+               new_string->string.length--;
 
                ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
                                  "%s: Removed invalid leading asterisk\n",
@@ -541,11 +549,12 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
         * "NNNN####" where N is an uppercase letter or decimal digit, and
         * # is a hex digit.
         */
-       for (dest = return_object->string.pointer; *source; dest++, source++) {
+       for (dest = new_string->string.pointer; *source; dest++, source++) {
                *dest = (char)toupper((int)*source);
        }
-       return_object->string.pointer[return_object->string.length] = 0;
 
+       acpi_ut_remove_reference(return_object);
+       *return_object_ptr = new_string;
        return_ACPI_STATUS(AE_OK);
 }
 
index 962cbb5e5f7fcb630806ec7abb75be479ffa9899..fe6a460c4373503f0f4df4124d20e1a5a094db2b 100644 (file)
@@ -1110,7 +1110,6 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
                  "%s called while %d clients hold a reference\n",
                  __func__, chan->client_count);
        mutex_lock(&dma_list_mutex);
-       list_del(&chan->device_node);
        device->chancnt--;
        chan->dev->chan = NULL;
        mutex_unlock(&dma_list_mutex);
index 19a23767533ac90c690055697949de2873db6153..7ab83fe601ede6ebdd4467036c0437cae6cf2454 100644 (file)
@@ -982,11 +982,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
 
        dev_vdbg(chan2dev(chan), "%s\n", __func__);
 
-       pm_runtime_get_sync(dw->dma.dev);
-
        /* ASSERT:  channel is idle */
        if (dma_readl(dw, CH_EN) & dwc->mask) {
-               pm_runtime_put_sync_suspend(dw->dma.dev);
                dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
                return -EIO;
        }
@@ -1003,7 +1000,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
         * We need controller-specific data to set up slave transfers.
         */
        if (chan->private && !dw_dma_filter(chan, chan->private)) {
-               pm_runtime_put_sync_suspend(dw->dma.dev);
                dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
                return -EINVAL;
        }
@@ -1047,8 +1043,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        if (!dw->in_use)
                do_dw_dma_off(dw);
 
-       pm_runtime_put_sync_suspend(dw->dma.dev);
-
        dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
 }
 
index 95f94a3ed6beb44f466fb511f3a78332a5903687..84a6ea60ecf0bddd6107312fc69bff2a19e2c28a 100644 (file)
@@ -398,17 +398,31 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
        return false;
 }
 
+static inline bool idxd_device_is_halted(struct idxd_device *idxd)
+{
+       union gensts_reg gensts;
+
+       gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+       return (gensts.state == IDXD_DEVICE_STATE_HALT);
+}
+
 /*
  * This is function is only used for reset during probe and will
  * poll for completion. Once the device is setup with interrupts,
  * all commands will be done via interrupt completion.
  */
-void idxd_device_init_reset(struct idxd_device *idxd)
+int idxd_device_init_reset(struct idxd_device *idxd)
 {
        struct device *dev = &idxd->pdev->dev;
        union idxd_command_reg cmd;
        unsigned long flags;
 
+       if (idxd_device_is_halted(idxd)) {
+               dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+               return -ENXIO;
+       }
+
        memset(&cmd, 0, sizeof(cmd));
        cmd.cmd = IDXD_CMD_RESET_DEVICE;
        dev_dbg(dev, "%s: sending reset for init.\n", __func__);
@@ -419,6 +433,7 @@ void idxd_device_init_reset(struct idxd_device *idxd)
               IDXD_CMDSTS_ACTIVE)
                cpu_relax();
        spin_unlock_irqrestore(&idxd->dev_lock, flags);
+       return 0;
 }
 
 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
@@ -428,6 +443,12 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
        DECLARE_COMPLETION_ONSTACK(done);
        unsigned long flags;
 
+       if (idxd_device_is_halted(idxd)) {
+               dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+               *status = IDXD_CMDSTS_HW_ERR;
+               return;
+       }
+
        memset(&cmd, 0, sizeof(cmd));
        cmd.cmd = cmd_code;
        cmd.operand = operand;
index 8ed2773d82859867c0d0f9b91ffc6cb38b226ee5..71fd6e4c42cd7456cd61a81fd1198a0200d92752 100644 (file)
@@ -205,5 +205,8 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
 
 void idxd_unregister_dma_channel(struct idxd_wq *wq)
 {
-       dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+       struct dma_chan *chan = &wq->dma_chan;
+
+       dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
+       list_del(&chan->device_node);
 }
index 5a50e91c71bf01fcfee884d8244be1fb6681dcc3..81a0e65fd316d7af96b316af82e4d8f029dfdf57 100644 (file)
@@ -326,7 +326,7 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
 
 /* device control */
-void idxd_device_init_reset(struct idxd_device *idxd);
+int idxd_device_init_reset(struct idxd_device *idxd);
 int idxd_device_enable(struct idxd_device *idxd);
 int idxd_device_disable(struct idxd_device *idxd);
 void idxd_device_reset(struct idxd_device *idxd);
index 2c051e07c34c2410458e2590ead84dbdc9f72201..fa04acd5582a0a13b09e37933166eba68b35662b 100644 (file)
@@ -335,7 +335,10 @@ static int idxd_probe(struct idxd_device *idxd)
        int rc;
 
        dev_dbg(dev, "%s entered and resetting device\n", __func__);
-       idxd_device_init_reset(idxd);
+       rc = idxd_device_init_reset(idxd);
+       if (rc < 0)
+               return rc;
+
        dev_dbg(dev, "IDXD reset complete\n");
 
        if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
index 593a2f6ed16cb64e8bdf6dd5eaf5c92b596be984..a60ca11a5784a9c5dd0d55e4145868c13845d3a8 100644 (file)
@@ -111,19 +111,14 @@ irqreturn_t idxd_irq_handler(int vec, void *data)
        return IRQ_WAKE_THREAD;
 }
 
-irqreturn_t idxd_misc_thread(int vec, void *data)
+static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
 {
-       struct idxd_irq_entry *irq_entry = data;
-       struct idxd_device *idxd = irq_entry->idxd;
        struct device *dev = &idxd->pdev->dev;
        union gensts_reg gensts;
-       u32 cause, val = 0;
+       u32 val = 0;
        int i;
        bool err = false;
 
-       cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
-       iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
-
        if (cause & IDXD_INTC_ERR) {
                spin_lock_bh(&idxd->dev_lock);
                for (i = 0; i < 4; i++)
@@ -181,7 +176,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
                              val);
 
        if (!err)
-               goto out;
+               return 0;
 
        /*
         * This case should rarely happen and typically is due to software
@@ -211,37 +206,58 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
                                gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
                                "FLR" : "system reset");
                        spin_unlock_bh(&idxd->dev_lock);
+                       return -ENXIO;
                }
        }
 
- out:
+       return 0;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+       struct idxd_irq_entry *irq_entry = data;
+       struct idxd_device *idxd = irq_entry->idxd;
+       int rc;
+       u32 cause;
+
+       cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+       if (cause)
+               iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+       while (cause) {
+               rc = process_misc_interrupts(idxd, cause);
+               if (rc < 0)
+                       break;
+               cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+               if (cause)
+                       iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+       }
+
        idxd_unmask_msix_vector(idxd, irq_entry->id);
        return IRQ_HANDLED;
 }
 
-static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
+static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
 {
        /*
         * Completion address can be bad as well. Check fault address match for descriptor
         * and completion address.
         */
-       if ((u64)desc->hw == fault_addr ||
-           (u64)desc->completion == fault_addr) {
-               idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
+       if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
+               struct idxd_device *idxd = desc->wq->idxd;
+               struct device *dev = &idxd->pdev->dev;
+
+               dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
                return true;
        }
 
        return false;
 }
 
-static bool complete_desc(struct idxd_desc *desc)
+static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
 {
-       if (desc->completion->status) {
-               idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
-               return true;
-       }
-
-       return false;
+       idxd_dma_complete_txd(desc, reason);
+       idxd_free_desc(desc->wq, desc);
 }
 
 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
@@ -251,25 +267,25 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
        struct idxd_desc *desc, *t;
        struct llist_node *head;
        int queued = 0;
-       bool completed = false;
        unsigned long flags;
+       enum idxd_complete_type reason;
 
        *processed = 0;
        head = llist_del_all(&irq_entry->pending_llist);
        if (!head)
                goto out;
 
-       llist_for_each_entry_safe(desc, t, head, llnode) {
-               if (wtype == IRQ_WORK_NORMAL)
-                       completed = complete_desc(desc);
-               else if (wtype == IRQ_WORK_PROCESS_FAULT)
-                       completed = process_fault(desc, data);
+       if (wtype == IRQ_WORK_NORMAL)
+               reason = IDXD_COMPLETE_NORMAL;
+       else
+               reason = IDXD_COMPLETE_DEV_FAIL;
 
-               if (completed) {
-                       idxd_free_desc(desc->wq, desc);
+       llist_for_each_entry_safe(desc, t, head, llnode) {
+               if (desc->completion->status) {
+                       if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+                               match_fault(desc, data);
+                       complete_desc(desc, reason);
                        (*processed)++;
-                       if (wtype == IRQ_WORK_PROCESS_FAULT)
-                               break;
                } else {
                        spin_lock_irqsave(&irq_entry->list_lock, flags);
                        list_add_tail(&desc->list,
@@ -287,42 +303,46 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
                                 enum irq_work_type wtype,
                                 int *processed, u64 data)
 {
-       struct list_head *node, *next;
        int queued = 0;
-       bool completed = false;
        unsigned long flags;
+       LIST_HEAD(flist);
+       struct idxd_desc *desc, *n;
+       enum idxd_complete_type reason;
 
        *processed = 0;
-       spin_lock_irqsave(&irq_entry->list_lock, flags);
-       if (list_empty(&irq_entry->work_list))
-               goto out;
-
-       list_for_each_safe(node, next, &irq_entry->work_list) {
-               struct idxd_desc *desc =
-                       container_of(node, struct idxd_desc, list);
+       if (wtype == IRQ_WORK_NORMAL)
+               reason = IDXD_COMPLETE_NORMAL;
+       else
+               reason = IDXD_COMPLETE_DEV_FAIL;
 
+       /*
+        * This lock protects list corruption from access of list outside of the irq handler
+        * thread.
+        */
+       spin_lock_irqsave(&irq_entry->list_lock, flags);
+       if (list_empty(&irq_entry->work_list)) {
                spin_unlock_irqrestore(&irq_entry->list_lock, flags);
-               if (wtype == IRQ_WORK_NORMAL)
-                       completed = complete_desc(desc);
-               else if (wtype == IRQ_WORK_PROCESS_FAULT)
-                       completed = process_fault(desc, data);
+               return 0;
+       }
 
-               if (completed) {
-                       spin_lock_irqsave(&irq_entry->list_lock, flags);
+       list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
+               if (desc->completion->status) {
                        list_del(&desc->list);
-                       spin_unlock_irqrestore(&irq_entry->list_lock, flags);
-                       idxd_free_desc(desc->wq, desc);
                        (*processed)++;
-                       if (wtype == IRQ_WORK_PROCESS_FAULT)
-                               return queued;
+                       list_add_tail(&desc->list, &flist);
                } else {
                        queued++;
                }
-               spin_lock_irqsave(&irq_entry->list_lock, flags);
        }
 
- out:
        spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+
+       list_for_each_entry(desc, &flist, list) {
+               if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+                       match_fault(desc, data);
+               complete_desc(desc, reason);
+       }
+
        return queued;
 }
 
index 298460438bb4d86ec1e2df62bc8b960b8fbf31c9..f474a1232335450df6b4410c8cb298432a61f1ee 100644 (file)
@@ -2401,7 +2401,8 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
                        dev_err(ud->ddev.dev,
                                "Descriptor pool allocation failed\n");
                        uc->use_dma_pool = false;
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto err_res_free;
                }
 
                uc->use_dma_pool = true;
index 500abd27fb225e5e9e794b06277e1eb1aa998dae..1b73647cc3b1a658ab94804bdbf6b35aed310a18 100644 (file)
@@ -777,7 +777,7 @@ static int i3c_hci_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct __maybe_unused of_device_id i3c_hci_of_match[] = {
+static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
        { .compatible = "mipi-i3c-hci", },
        {},
 };
index 7dc230677b78e2d6876e286cf51bfc2bd3540de2..45fdb1256dbfeb6160e1e3f6aafddc657677b786 100644 (file)
@@ -233,9 +233,24 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
 {
        struct ocelot *ocelot = ds->priv;
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       int err;
+
+       ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
+                        DEV_MAC_ENA_CFG);
 
-       ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
        ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
+
+       err = ocelot_port_flush(ocelot, port);
+       if (err)
+               dev_err(ocelot->dev, "failed to flush port %d: %d\n",
+                       port, err);
+
+       /* Put the port in reset. */
+       ocelot_port_writel(ocelot_port,
+                          DEV_CLOCK_CFG_MAC_TX_RST |
+                          DEV_CLOCK_CFG_MAC_RX_RST |
+                          DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
+                          DEV_CLOCK_CFG);
 }
 
 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
index 06596fa1f9fea3dc6c6ff4e3820f5a7d88c24259..a0596c073dddc10d03f0625b58c24f2edbd91b50 100644 (file)
@@ -404,6 +404,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
                if (unlikely(!xdpf)) {
                        trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
                        xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+                       verdict = XDP_ABORTED;
                        break;
                }
 
@@ -424,7 +425,10 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
                        xdp_stat = &rx_ring->rx_stats.xdp_redirect;
                        break;
                }
-               fallthrough;
+               trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+               xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+               verdict = XDP_ABORTED;
+               break;
        case XDP_ABORTED:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
                xdp_stat = &rx_ring->rx_stats.xdp_aborted;
index 0c5373462cedb73d93b0a23faa7d15a87db35d5b..0b1b5f9c67d47446455a994b3b09876e58396d46 100644 (file)
@@ -219,6 +219,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
        CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
        CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x6092), /* Custom T62100-CR-LOM */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
index 4360ce4d3fb6a1a9a4be2796a01c848f479251db..6faa20bed48858c7785934b0c78c6246d9699f04 100644 (file)
@@ -2180,8 +2180,10 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
                                struct xdp_frame **init_xdpf)
 {
        struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
-       void *new_buff;
+       void *new_buff, *aligned_data;
        struct page *p;
+       u32 data_shift;
+       int headroom;
 
        /* Check the data alignment and make sure the headroom is large
         * enough to store the xdpf backpointer. Use an aligned headroom
@@ -2191,25 +2193,57 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
         * byte frame headroom. If the XDP program uses all of it, copy the
         * data to a new buffer and make room for storing the backpointer.
         */
-       if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) &&
+       if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
            xdpf->headroom >= priv->tx_headroom) {
                xdpf->headroom = priv->tx_headroom;
                return 0;
        }
 
+       /* Try to move the data inside the buffer just enough to align it and
+        * store the xdpf backpointer. If the available headroom isn't large
+        * enough, resort to allocating a new buffer and copying the data.
+        */
+       aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
+       data_shift = xdpf->data - aligned_data;
+
+       /* The XDP frame's headroom needs to be large enough to accommodate
+        * shifting the data as well as storing the xdpf backpointer.
+        */
+       if (xdpf->headroom  >= data_shift + priv->tx_headroom) {
+               memmove(aligned_data, xdpf->data, xdpf->len);
+               xdpf->data = aligned_data;
+               xdpf->headroom = priv->tx_headroom;
+               return 0;
+       }
+
+       /* The new xdp_frame is stored in the new buffer. Reserve enough space
+        * in the headroom for storing it along with the driver's private
+        * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
+        * guarantee the data's alignment in the buffer.
+        */
+       headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
+                        DPAA_FD_DATA_ALIGNMENT);
+
+       /* Assure the extended headroom and data don't overflow the buffer,
+        * while maintaining the mandatory tailroom.
+        */
+       if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
+                       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+               return -ENOMEM;
+
        p = dev_alloc_pages(0);
        if (unlikely(!p))
                return -ENOMEM;
 
        /* Copy the data to the new buffer at a properly aligned offset */
        new_buff = page_address(p);
-       memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len);
+       memcpy(new_buff + headroom, xdpf->data, xdpf->len);
 
        /* Create an XDP frame around the new buffer in a similar fashion
         * to xdp_convert_buff_to_frame.
         */
        new_xdpf = new_buff;
-       new_xdpf->data = new_buff + priv->tx_headroom;
+       new_xdpf->data = new_buff + headroom;
        new_xdpf->len = xdpf->len;
        new_xdpf->headroom = priv->tx_headroom;
        new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
index e1e950d48c92baf3a88c56c3ecff66486d1e471b..c71fe8d751d50342a9ad2d764a4afe01ba1aaf18 100644 (file)
@@ -196,6 +196,8 @@ enum enetc_bdr_type {TX, RX};
 #define ENETC_CBS_BW_MASK      GENMASK(6, 0)
 #define ENETC_PTCCBSR1(n)      (0x1114 + (n) * 8) /* n = 0 to 7*/
 #define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSCAPR         0x1404
+#define ENETC_PRSSCAPR_GET_NUM_RSS(val)        (BIT((val) & 0xf) * 32)
 #define ENETC_PRSSK(n)         (0x1410 + (n) * 4) /* n = [0..9] */
 #define ENETC_PSIVLANFMR       0x1700
 #define ENETC_PSIVLANFMR_VS    BIT(0)
index ed8fcb8b486ebcaf437cb4a1d03a02461c8c828c..3eb5f1375bd4c4bf5b623569700eda09f8084f1b 100644 (file)
@@ -996,6 +996,51 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
                phylink_destroy(priv->phylink);
 }
 
+/* Initialize the entire shared memory for the flow steering entries
+ * of this port (PF + VFs)
+ */
+static int enetc_init_port_rfs_memory(struct enetc_si *si)
+{
+       struct enetc_cmd_rfse rfse = {0};
+       struct enetc_hw *hw = &si->hw;
+       int num_rfs, i, err = 0;
+       u32 val;
+
+       val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+       num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+
+       for (i = 0; i < num_rfs; i++) {
+               err = enetc_set_fs_entry(si, &rfse, i);
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
+static int enetc_init_port_rss_memory(struct enetc_si *si)
+{
+       struct enetc_hw *hw = &si->hw;
+       int num_rss, err;
+       int *rss_table;
+       u32 val;
+
+       val = enetc_port_rd(hw, ENETC_PRSSCAPR);
+       num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
+       if (!num_rss)
+               return 0;
+
+       rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
+       if (!rss_table)
+               return -ENOMEM;
+
+       err = enetc_set_rss_table(si, rss_table, num_rss);
+
+       kfree(rss_table);
+
+       return err;
+}
+
 static int enetc_pf_probe(struct pci_dev *pdev,
                          const struct pci_device_id *ent)
 {
@@ -1051,6 +1096,18 @@ static int enetc_pf_probe(struct pci_dev *pdev,
                goto err_alloc_si_res;
        }
 
+       err = enetc_init_port_rfs_memory(si);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
+               goto err_init_port_rfs;
+       }
+
+       err = enetc_init_port_rss_memory(si);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
+               goto err_init_port_rss;
+       }
+
        err = enetc_alloc_msix(priv);
        if (err) {
                dev_err(&pdev->dev, "MSIX alloc failed\n");
@@ -1079,6 +1136,8 @@ err_phylink_create:
        enetc_mdiobus_destroy(pf);
 err_mdiobus_create:
        enetc_free_msix(priv);
+err_init_port_rss:
+err_init_port_rfs:
 err_alloc_msix:
        enetc_free_si_resources(priv);
 err_alloc_si_res:
index c242883fea5db8af20ac10d629b2d7350990c33a..48549db23c5241e6bcd04d3584c0b837c6f981a0 100644 (file)
@@ -9813,12 +9813,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
 
 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
 {
+       struct hnae3_handle *handle = &vport->nic;
        struct hclge_dev *hdev = vport->back;
        int reset_try_times = 0;
        int reset_status;
        u16 queue_gid;
        int ret;
 
+       if (queue_id >= handle->kinfo.num_tqps) {
+               dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
+                        queue_id);
+               return;
+       }
+
        queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
 
        ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
index 754c09ada9019ff8c3ea247a9d2a23662be575d0..ffb416e088a978800a9582abd80ab5c77cec4fad 100644 (file)
@@ -158,21 +158,31 @@ static int hclge_get_ring_chain_from_mbx(
                        struct hclge_vport *vport)
 {
        struct hnae3_ring_chain_node *cur_chain, *new_chain;
+       struct hclge_dev *hdev = vport->back;
        int ring_num;
-       int i = 0;
+       int i;
 
        ring_num = req->msg.ring_num;
 
        if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
                return -ENOMEM;
 
+       for (i = 0; i < ring_num; i++) {
+               if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+                       dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
+                               req->msg.param[i].tqp_index,
+                               vport->nic.kinfo.rss_size - 1);
+                       return -EINVAL;
+               }
+       }
+
        hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
-                     req->msg.param[i].ring_type);
+                     req->msg.param[0].ring_type);
        ring_chain->tqp_index =
                hclge_get_queue_id(vport->nic.kinfo.tqp
-                                  [req->msg.param[i].tqp_index]);
+                                  [req->msg.param[0].tqp_index]);
        hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
-                       HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
+                       HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
 
        cur_chain = ring_chain;
 
@@ -597,6 +607,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
 
        index = mbx_req->msg.data[0];
 
+       /* Check the query index of rss_hash_key from VF, make sure no
+        * more than the size of rss_hash_key.
+        */
+       if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
+             sizeof(vport[0].rss_hash_key)) {
+               dev_warn(&hdev->pdev->dev,
+                        "failed to get the rss hash key, the index(%u) invalid !\n",
+                        index);
+               return;
+       }
+
        memcpy(resp_msg->data,
               &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
               HCLGE_RSS_MBX_RESP_LEN);
index f79034c786c847427fbf8bc9ad6921049a9d84e7..a536fdbf05e196b58603c47c2c7de7c1a2afb9fe 100644 (file)
@@ -4918,7 +4918,22 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                                complete(&adapter->init_done);
                                adapter->init_done_rc = -EIO;
                        }
-                       ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+                       rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+                       if (rc && rc != -EBUSY) {
+                               /* We were unable to schedule the failover
+                                * reset either because the adapter was still
+                                * probing (eg: during kexec) or we could not
+                                * allocate memory. Clear the failover_pending
+                                * flag since no one else will. We ignore
+                                * EBUSY because it means either FAILOVER reset
+                                * is already scheduled or the adapter is
+                                * being removed.
+                                */
+                               netdev_err(netdev,
+                                          "Error %ld scheduling failover reset\n",
+                                          rc);
+                               adapter->failover_pending = false;
+                       }
                        break;
                case IBMVNIC_CRQ_INIT_COMPLETE:
                        dev_info(dev, "Partner initialization complete\n");
index ff87a0bc089cffc6cbe71c34ea88d86132ae1d53..c072eb5c07646b66db85d8392177074c0f9eac3c 100644 (file)
@@ -375,6 +375,60 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
        }
 }
 
+static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
+{
+       return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
+}
+
+int ocelot_port_flush(struct ocelot *ocelot, int port)
+{
+       int err, val;
+
+       /* Disable dequeuing from the egress queues */
+       ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
+                      QSYS_PORT_MODE_DEQUEUE_DIS,
+                      QSYS_PORT_MODE, port);
+
+       /* Disable flow control */
+       ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+
+       /* Disable priority flow control */
+       ocelot_fields_write(ocelot, port,
+                           QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
+
+       /* Wait at least the time it takes to receive a frame of maximum length
+        * at the port.
+        * Worst-case delays for 10 kilobyte jumbo frames are:
+        * 8 ms on a 10M port
+        * 800 Î¼s on a 100M port
+        * 80 Î¼s on a 1G port
+        * 32 Î¼s on a 2.5G port
+        */
+       usleep_range(8000, 10000);
+
+       /* Disable half duplex backpressure. */
+       ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
+                      SYS_FRONT_PORT_MODE, port);
+
+       /* Flush the queues associated with the port. */
+       ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
+                      REW_PORT_CFG, port);
+
+       /* Enable dequeuing from the egress queues. */
+       ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
+                      port);
+
+       /* Wait until flushing is complete. */
+       err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
+                               100, 2000000, false, ocelot, port);
+
+       /* Clear flushing again. */
+       ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+
+       return err;
+}
+EXPORT_SYMBOL(ocelot_port_flush);
+
 void ocelot_adjust_link(struct ocelot *ocelot, int port,
                        struct phy_device *phydev)
 {
index 0acb459484185a1b238e4f5f6332b118c9c2419c..ea4e83410fe4d03a6fa15f770c0dab9f399271b9 100644 (file)
@@ -71,6 +71,14 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
 }
 EXPORT_SYMBOL(ocelot_port_writel);
 
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
+{
+       u32 cur = ocelot_port_readl(port, reg);
+
+       ocelot_port_writel(port, (cur & (~mask)) | val, reg);
+}
+EXPORT_SYMBOL(ocelot_port_rmwl);
+
 u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
                            u32 reg, u32 offset)
 {
index 8ed3b2c834a09e44cce6f4810a5484c3cd5d7943..56985542e2029701e0977415bfcc83399361f6f4 100644 (file)
@@ -324,7 +324,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
 
                priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
        } else if (!qopt->enable) {
-               return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+               ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
+                                      MTL_QUEUE_DCB);
+               if (ret)
+                       return ret;
+
+               priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
        }
 
        /* Port Transmit Rate and Speed Divider */
index 2350342b961fff457b1f682181eb38bca1cb0ddb..13bd48a75db7692fb5ab221ea587d60c088e325f 100644 (file)
@@ -1262,8 +1262,11 @@ static int netvsc_receive(struct net_device *ndev,
                ret = rndis_filter_receive(ndev, net_device,
                                           nvchan, data, buflen);
 
-               if (unlikely(ret != NVSP_STAT_SUCCESS))
+               if (unlikely(ret != NVSP_STAT_SUCCESS)) {
+                       /* Drop incomplete packet */
+                       nvchan->rsc.cnt = 0;
                        status = NVSP_STAT_FAIL;
+               }
        }
 
        enq_receive_complete(ndev, net_device, q_idx,
index 598713c0d5a8753f1692a82d7f68eb0b83e19375..3aab2b867fc0d082374683800e55b9a5ce418872 100644 (file)
@@ -509,8 +509,6 @@ static int rndis_filter_receive_data(struct net_device *ndev,
        return ret;
 
 drop:
-       /* Drop incomplete packet */
-       nvchan->rsc.cnt = 0;
        return NVSP_STAT_FAIL;
 }
 
index 34e5f2155d6206463523624b12c8091839ebba1d..b77f5fef7aecab8325ddd29304500cd0b23fc531 100644 (file)
@@ -1710,6 +1710,7 @@ static int gsi_channel_setup(struct gsi *gsi)
                if (!channel->gsi)
                        continue;       /* Ignore uninitialized channels */
 
+               ret = -EINVAL;
                dev_err(gsi->dev, "channel %u not supported by hardware\n",
                        channel_id - 1);
                channel_id = gsi->channel_count;
index cc4819282820bf575ae8c8d2f364e8e1c7d898b9..5a05add9b4e690e10c5027e10d59bd21f867ef91 100644 (file)
@@ -1309,6 +1309,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},    /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
        {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
+       {QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)},    /* Cinterion MV31 RmNet */
        {QMI_FIXED_INTF(0x413c, 0x81a2, 8)},    /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a3, 8)},    /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
index bb164805804e87e08a2f21c74f66eef371cbe64c..4aaa6388b9ee01f4da67f17206a2a37d7c255424 100644 (file)
@@ -169,11 +169,11 @@ static int x25_open(struct net_device *dev)
 
        result = lapb_register(dev, &cb);
        if (result != LAPB_OK)
-               return result;
+               return -ENOMEM;
 
        result = lapb_getparms(dev, &params);
        if (result != LAPB_OK)
-               return result;
+               return -EINVAL;
 
        if (state(hdlc)->settings.dce)
                params.mode = params.mode | LAPB_DCE;
@@ -188,7 +188,7 @@ static int x25_open(struct net_device *dev)
 
        result = lapb_setparms(dev, &params);
        if (result != LAPB_OK)
-               return result;
+               return -EINVAL;
 
        return 0;
 }
index a84bb9b6573f8d559ddf6cba5444c7fb295dfb41..e150d82eddb6c72859872abc7b3d7e73b7484e94 100644 (file)
@@ -21,11 +21,9 @@ config ATH9K_BTCOEX_SUPPORT
 config ATH9K
        tristate "Atheros 802.11n wireless cards support"
        depends on MAC80211 && HAS_DMA
+       select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
        select ATH9K_HW
        select ATH9K_COMMON
-       imply NEW_LEDS
-       imply LEDS_CLASS
-       imply MAC80211_LEDS
        help
          This module adds support for wireless adapters based on
          Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
@@ -176,11 +174,9 @@ config ATH9K_PCI_NO_EEPROM
 config ATH9K_HTC
        tristate "Atheros HTC based wireless cards support"
        depends on USB && MAC80211
+       select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
        select ATH9K_HW
        select ATH9K_COMMON
-       imply NEW_LEDS
-       imply LEDS_CLASS
-       imply MAC80211_LEDS
        help
          Support for Atheros HTC based cards.
          Chipsets supported: AR9271
index 73eeb00d5aa640a4c7f1635384282b04578b8cb5..e81dfaf99bcbf41d461fa732ed55a5fa872262d2 100644 (file)
@@ -509,15 +509,17 @@ static void
 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
                  int len, bool more)
 {
-       struct page *page = virt_to_head_page(data);
-       int offset = data - page_address(page);
        struct sk_buff *skb = q->rx_head;
        struct skb_shared_info *shinfo = skb_shinfo(skb);
 
        if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
-               offset += q->buf_offset;
+               struct page *page = virt_to_head_page(data);
+               int offset = data - page_address(page) + q->buf_offset;
+
                skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
                                q->buf_size);
+       } else {
+               skb_free_frag(data);
        }
 
        if (more)
index b8febe1d1bfd3eaef08029eab9b4a228d5cb7542..accc991d153f7c787c1b83bedb14183d5019c1c4 100644 (file)
@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
        RING_IDX prod, cons;
        struct sk_buff *skb;
        int needed;
+       unsigned long flags;
+
+       spin_lock_irqsave(&queue->rx_queue.lock, flags);
 
        skb = skb_peek(&queue->rx_queue);
-       if (!skb)
+       if (!skb) {
+               spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
                return false;
+       }
 
        needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
        if (skb_is_gso(skb))
@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
        if (skb->sw_hash)
                needed++;
 
+       spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+
        do {
                prod = queue->rx.sring->req_prod;
                cons = queue->rx.req_cons;
index aa4c122823018b756cdc566c33e20dbf834dfb10..da524c4d7b7e03f7d9a218f9e78ef0849b16de2d 100644 (file)
@@ -203,7 +203,7 @@ config TMPFS_XATTR
 
 config TMPFS_INODE64
        bool "Use 64-bit ino_t by default in tmpfs"
-       depends on TMPFS && 64BIT
+       depends on TMPFS && 64BIT && !(S390 || ALPHA)
        default n
        help
          tmpfs has historically used only inode numbers as wide as an unsigned
index 64bc81363c6cc0437dd4c757615af395c85b0475..e1bd592ce7001a28a2c18fdb7335e4d0d0b8a8ed 100644 (file)
@@ -141,6 +141,7 @@ const struct file_operations nilfs_file_operations = {
        /* .release     = nilfs_release_file, */
        .fsync          = nilfs_sync_file,
        .splice_read    = generic_file_splice_read,
+       .splice_write   = iter_file_splice_write,
 };
 
 const struct inode_operations nilfs_file_inode_operations = {
index 8a19773b5a0b7f14b86f16e97643e7b4c76f941b..45f44425d85601f14b09c49c8bdbc36294d9449b 100644 (file)
@@ -196,9 +196,15 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
                length = SQUASHFS_COMPRESSED_SIZE(length);
                index += 2;
 
-               TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
+               TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
                      compressed ? "" : "un", length);
        }
+       if (length < 0 || length > output->length ||
+                       (index + length) > msblk->bytes_used) {
+               res = -EIO;
+               goto out;
+       }
+
        if (next_index)
                *next_index = index + length;
 
index ae2c87bb0fbec8d7a15cbaaf536d650abbdfd42e..eb02072d28dd6dc6c375facc3da051bddafac30a 100644 (file)
@@ -41,12 +41,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
        struct squashfs_sb_info *msblk = sb->s_fs_info;
        int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
        int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
-       u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
+       u64 start;
        __le64 ino;
        int err;
 
        TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
 
+       if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
+               return -EINVAL;
+
+       start = le64_to_cpu(msblk->inode_lookup_table[blk]);
+
        err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
        if (err < 0)
                return err;
@@ -111,7 +116,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
                u64 lookup_table_start, u64 next_table, unsigned int inodes)
 {
        unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
+       unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
+       int n;
        __le64 *table;
+       u64 start, end;
 
        TRACE("In read_inode_lookup_table, length %d\n", length);
 
@@ -121,20 +129,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
        if (inodes == 0)
                return ERR_PTR(-EINVAL);
 
-       /* length bytes should not extend into the next table - this check
-        * also traps instances where lookup_table_start is incorrectly larger
-        * than the next table start
+       /*
+        * The computed size of the lookup table (length bytes) should exactly
+        * match the table start and end points
         */
-       if (lookup_table_start + length > next_table)
+       if (length != (next_table - lookup_table_start))
                return ERR_PTR(-EINVAL);
 
        table = squashfs_read_table(sb, lookup_table_start, length);
+       if (IS_ERR(table))
+               return table;
 
        /*
-        * table[0] points to the first inode lookup table metadata block,
-        * this should be less than lookup_table_start
+        * table0], table[1], ... table[indexes - 1] store the locations
+        * of the compressed inode lookup blocks.  Each entry should be
+        * less than the next (i.e. table[0] < table[1]), and the difference
+        * between them should be SQUASHFS_METADATA_SIZE or less.
+        * table[indexes - 1] should  be less than lookup_table_start, and
+        * again the difference should be SQUASHFS_METADATA_SIZE or less
         */
-       if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
+       for (n = 0; n < (indexes - 1); n++) {
+               start = le64_to_cpu(table[n]);
+               end = le64_to_cpu(table[n + 1]);
+
+               if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+                       kfree(table);
+                       return ERR_PTR(-EINVAL);
+               }
+       }
+
+       start = le64_to_cpu(table[indexes - 1]);
+       if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
                kfree(table);
                return ERR_PTR(-EINVAL);
        }
index 6be5afe7287d63d80dc1cff0d668065e873525dc..11581bf31af41dcfd99293ef4a261d8e8440a331 100644 (file)
@@ -35,10 +35,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
        struct squashfs_sb_info *msblk = sb->s_fs_info;
        int block = SQUASHFS_ID_BLOCK(index);
        int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
-       u64 start_block = le64_to_cpu(msblk->id_table[block]);
+       u64 start_block;
        __le32 disk_id;
        int err;
 
+       if (index >= msblk->ids)
+               return -EINVAL;
+
+       start_block = le64_to_cpu(msblk->id_table[block]);
+
        err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
                                                        sizeof(disk_id));
        if (err < 0)
@@ -56,7 +61,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
                u64 id_table_start, u64 next_table, unsigned short no_ids)
 {
        unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
+       unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
+       int n;
        __le64 *table;
+       u64 start, end;
 
        TRACE("In read_id_index_table, length %d\n", length);
 
@@ -67,20 +75,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
                return ERR_PTR(-EINVAL);
 
        /*
-        * length bytes should not extend into the next table - this check
-        * also traps instances where id_table_start is incorrectly larger
-        * than the next table start
+        * The computed size of the index table (length bytes) should exactly
+        * match the table start and end points
         */
-       if (id_table_start + length > next_table)
+       if (length != (next_table - id_table_start))
                return ERR_PTR(-EINVAL);
 
        table = squashfs_read_table(sb, id_table_start, length);
+       if (IS_ERR(table))
+               return table;
 
        /*
-        * table[0] points to the first id lookup table metadata block, this
-        * should be less than id_table_start
+        * table[0], table[1], ... table[indexes - 1] store the locations
+        * of the compressed id blocks.   Each entry should be less than
+        * the next (i.e. table[0] < table[1]), and the difference between them
+        * should be SQUASHFS_METADATA_SIZE or less.  table[indexes - 1]
+        * should be less than id_table_start, and again the difference
+        * should be SQUASHFS_METADATA_SIZE or less
         */
-       if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
+       for (n = 0; n < (indexes - 1); n++) {
+               start = le64_to_cpu(table[n]);
+               end = le64_to_cpu(table[n + 1]);
+
+               if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+                       kfree(table);
+                       return ERR_PTR(-EINVAL);
+               }
+       }
+
+       start = le64_to_cpu(table[indexes - 1]);
+       if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
                kfree(table);
                return ERR_PTR(-EINVAL);
        }
index 34c21ffb6df37385219df07ed1e24f5ddabf3311..166e98806265bf4e695668ebbb7a9658d53c2e53 100644 (file)
@@ -64,5 +64,6 @@ struct squashfs_sb_info {
        unsigned int                            inodes;
        unsigned int                            fragments;
        int                                     xattr_ids;
+       unsigned int                            ids;
 };
 #endif
index d6c6593ec169e724b84413a8f0274cca29994146..88cc94be10765c7f76b223536a8195eeb30f0296 100644 (file)
@@ -166,6 +166,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
        msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
        msblk->inodes = le32_to_cpu(sblk->inodes);
        msblk->fragments = le32_to_cpu(sblk->fragments);
+       msblk->ids = le16_to_cpu(sblk->no_ids);
        flags = le16_to_cpu(sblk->flags);
 
        TRACE("Found valid superblock on %pg\n", sb->s_bdev);
@@ -177,7 +178,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
        TRACE("Block size %d\n", msblk->block_size);
        TRACE("Number of inodes %d\n", msblk->inodes);
        TRACE("Number of fragments %d\n", msblk->fragments);
-       TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
+       TRACE("Number of ids %d\n", msblk->ids);
        TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
        TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
        TRACE("sblk->fragment_table_start %llx\n",
@@ -236,8 +237,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
 allocate_id_index_table:
        /* Allocate and read id index table */
        msblk->id_table = squashfs_read_id_index_table(sb,
-               le64_to_cpu(sblk->id_table_start), next_table,
-               le16_to_cpu(sblk->no_ids));
+               le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
        if (IS_ERR(msblk->id_table)) {
                errorf(fc, "unable to read id index table");
                err = PTR_ERR(msblk->id_table);
index 184129afd456654776e0dbf29b167b12648aa162..d8a270d3ac4cb267dbbbc4097c2b1e7034e66072 100644 (file)
@@ -17,8 +17,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
 static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
                u64 start, u64 *xattr_table_start, int *xattr_ids)
 {
+       struct squashfs_xattr_id_table *id_table;
+
+       id_table = squashfs_read_table(sb, start, sizeof(*id_table));
+       if (IS_ERR(id_table))
+               return (__le64 *) id_table;
+
+       *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
+       kfree(id_table);
+
        ERROR("Xattrs in filesystem, these will be ignored\n");
-       *xattr_table_start = start;
        return ERR_PTR(-ENOTSUPP);
 }
 
index d99e08464554f4b9c329d64ae520d20a8b3e4728..ead66670b41a593a1e24607bfc7bb0dc8ebcef75 100644 (file)
@@ -31,10 +31,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
        struct squashfs_sb_info *msblk = sb->s_fs_info;
        int block = SQUASHFS_XATTR_BLOCK(index);
        int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
-       u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
+       u64 start_block;
        struct squashfs_xattr_id id;
        int err;
 
+       if (index >= msblk->xattr_ids)
+               return -EINVAL;
+
+       start_block = le64_to_cpu(msblk->xattr_id_table[block]);
+
        err = squashfs_read_metadata(sb, &id, &start_block, &offset,
                                                        sizeof(id));
        if (err < 0)
@@ -50,13 +55,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
 /*
  * Read uncompressed xattr id lookup table indexes from disk into memory
  */
-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
+__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
                u64 *xattr_table_start, int *xattr_ids)
 {
-       unsigned int len;
+       struct squashfs_sb_info *msblk = sb->s_fs_info;
+       unsigned int len, indexes;
        struct squashfs_xattr_id_table *id_table;
+       __le64 *table;
+       u64 start, end;
+       int n;
 
-       id_table = squashfs_read_table(sb, start, sizeof(*id_table));
+       id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
        if (IS_ERR(id_table))
                return (__le64 *) id_table;
 
@@ -70,13 +79,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
        if (*xattr_ids == 0)
                return ERR_PTR(-EINVAL);
 
-       /* xattr_table should be less than start */
-       if (*xattr_table_start >= start)
+       len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+       indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
+
+       /*
+        * The computed size of the index table (len bytes) should exactly
+        * match the table start and end points
+        */
+       start = table_start + sizeof(*id_table);
+       end = msblk->bytes_used;
+
+       if (len != (end - start))
                return ERR_PTR(-EINVAL);
 
-       len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+       table = squashfs_read_table(sb, start, len);
+       if (IS_ERR(table))
+               return table;
+
+       /* table[0], table[1], ... table[indexes - 1] store the locations
+        * of the compressed xattr id blocks.  Each entry should be less than
+        * the next (i.e. table[0] < table[1]), and the difference between them
+        * should be SQUASHFS_METADATA_SIZE or less.  table[indexes - 1]
+        * should be less than table_start, and again the difference
+        * shouls be SQUASHFS_METADATA_SIZE or less.
+        *
+        * Finally xattr_table_start should be less than table[0].
+        */
+       for (n = 0; n < (indexes - 1); n++) {
+               start = le64_to_cpu(table[n]);
+               end = le64_to_cpu(table[n + 1]);
+
+               if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+                       kfree(table);
+                       return ERR_PTR(-EINVAL);
+               }
+       }
+
+       start = le64_to_cpu(table[indexes - 1]);
+       if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
+               kfree(table);
+               return ERR_PTR(-EINVAL);
+       }
 
-       TRACE("In read_xattr_index_table, length %d\n", len);
+       if (*xattr_table_start >= le64_to_cpu(table[0])) {
+               kfree(table);
+               return ERR_PTR(-EINVAL);
+       }
 
-       return squashfs_read_table(sb, start + sizeof(*id_table), len);
+       return table;
 }
index b2b3d81b1535a5abbf6803f3dfbb29058a979bc4..b97c628ad91ff3c8b9f7c81f8bc0b6909bf35b74 100644 (file)
        }                                                               \
                                                                        \
        /* Built-in firmware blobs */                                   \
-       .builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {      \
+       .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) {    \
                __start_builtin_fw = .;                                 \
                KEEP(*(.builtin_fw))                                    \
                __end_builtin_fw = .;                                   \
index 259be67644e354e3994e423977c03654f0b22c3a..5ff27c12ce68833a2c142be425c82bffe8eecbd0 100644 (file)
@@ -4352,6 +4352,7 @@ static inline void netif_tx_disable(struct net_device *dev)
 
        local_bh_disable();
        cpu = smp_processor_id();
+       spin_lock(&dev->tx_global_lock);
        for (i = 0; i < dev->num_tx_queues; i++) {
                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 
@@ -4359,6 +4360,7 @@ static inline void netif_tx_disable(struct net_device *dev)
                netif_tx_stop_queue(txq);
                __netif_tx_unlock(txq);
        }
+       spin_unlock(&dev->tx_global_lock);
        local_bh_enable();
 }
 
index 72d88566694ee975e6ad69fbae8730e125e0dd56..27ff8eb786dc34817ac875f780b478c2b93a758f 100644 (file)
@@ -260,7 +260,13 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 {
        i->count = count;
 }
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
+
+struct csum_state {
+       __wsum csum;
+       size_t off;
+};
+
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
index 99cd538d6519147df134940c2f1d28f4aea2d84d..afdf8bd1b4fe52f4be39a0a15d1b36e08a9d90fe 100644 (file)
@@ -42,7 +42,6 @@ enum switchdev_attr_id {
        SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
        SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
 #if IS_ENABLED(CONFIG_BRIDGE_MRP)
-       SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
        SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
 #endif
 };
@@ -62,7 +61,6 @@ struct switchdev_attr {
                u16 vlan_protocol;                      /* BRIDGE_VLAN_PROTOCOL */
                bool mc_disabled;                       /* MC_DISABLED */
 #if IS_ENABLED(CONFIG_BRIDGE_MRP)
-               u8 mrp_port_state;                      /* MRP_PORT_STATE */
                u8 mrp_port_role;                       /* MRP_PORT_ROLE */
 #endif
        } u;
index 2f4cd3288bccc5ab86e0c7584d29efc41a315a26..c34b9ccb64722df8612232eaea34756d52dbff51 100644 (file)
@@ -709,6 +709,7 @@ struct ocelot_policer {
 /* I/O */
 u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
 void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg);
 u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
 void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
 void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
@@ -737,6 +738,7 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
 int ocelot_get_ts_info(struct ocelot *ocelot, int port,
                       struct ethtool_ts_info *info);
 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
+int ocelot_port_flush(struct ocelot *ocelot, int port);
 void ocelot_adjust_link(struct ocelot *ocelot, int port,
                        struct phy_device *phydev);
 int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
index aea96b6384734905dff81aa6cb351d752612c15a..bfafbf115bf306a2712c22dfb4eae1681c36fff1 100644 (file)
@@ -115,6 +115,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
 
        /* hash table size must be power of 2 */
        n_buckets = roundup_pow_of_two(attr->max_entries);
+       if (!n_buckets)
+               return ERR_PTR(-E2BIG);
 
        cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
        cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
index e7368c5eacb7bdc508099841bc447612624faa7c..37581919e050c8fc63afb74aa9812d7e40df8eea 100644 (file)
@@ -6877,7 +6877,7 @@ static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
        case BPF_JSGT:
                if (reg->s32_min_value > sval)
                        return 1;
-               else if (reg->s32_max_value < sval)
+               else if (reg->s32_max_value <= sval)
                        return 0;
                break;
        case BPF_JLT:
@@ -6950,7 +6950,7 @@ static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
        case BPF_JSGT:
                if (reg->smin_value > sval)
                        return 1;
-               else if (reg->smax_value < sval)
+               else if (reg->smax_value <= sval)
                        return 0;
                break;
        case BPF_JLT:
@@ -8590,7 +8590,11 @@ static bool range_within(struct bpf_reg_state *old,
        return old->umin_value <= cur->umin_value &&
               old->umax_value >= cur->umax_value &&
               old->smin_value <= cur->smin_value &&
-              old->smax_value >= cur->smax_value;
+              old->smax_value >= cur->smax_value &&
+              old->u32_min_value <= cur->u32_min_value &&
+              old->u32_max_value >= cur->u32_max_value &&
+              old->s32_min_value <= cur->s32_min_value &&
+              old->s32_max_value >= cur->s32_max_value;
 }
 
 /* Maximum number of register states that can exist at once */
@@ -10999,30 +11003,28 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
                    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
                        bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
-                       struct bpf_insn mask_and_div[] = {
-                               BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+                       bool isdiv = BPF_OP(insn->code) == BPF_DIV;
+                       struct bpf_insn *patchlet;
+                       struct bpf_insn chk_and_div[] = {
                                /* Rx div 0 -> 0 */
-                               BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
+                               BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+                                            BPF_JNE | BPF_K, insn->src_reg,
+                                            0, 2, 0),
                                BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
                                BPF_JMP_IMM(BPF_JA, 0, 0, 1),
                                *insn,
                        };
-                       struct bpf_insn mask_and_mod[] = {
-                               BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+                       struct bpf_insn chk_and_mod[] = {
                                /* Rx mod 0 -> Rx */
-                               BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
+                               BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+                                            BPF_JEQ | BPF_K, insn->src_reg,
+                                            0, 1, 0),
                                *insn,
                        };
-                       struct bpf_insn *patchlet;
 
-                       if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
-                           insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
-                               patchlet = mask_and_div + (is64 ? 1 : 0);
-                               cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
-                       } else {
-                               patchlet = mask_and_mod + (is64 ? 1 : 0);
-                               cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
-                       }
+                       patchlet = isdiv ? chk_and_div : chk_and_mod;
+                       cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
+                                     ARRAY_SIZE(chk_and_mod);
 
                        new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
                        if (!new_prog)
index 6c0018abe68a026f5f340a87659449dd95224301..764400260eb60d71ac15bd8a49b8e98b2237acf1 100644 (file)
@@ -96,9 +96,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 {
        unsigned int ret;
 
-       if (in_nmi()) /* not supported yet */
-               return 1;
-
        cant_sleep();
 
        if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
index e9d28eeccb7e69cfd7a6fd4109d02a4c12d19d24..d387b774ceeb6ea9324d73df54c4ec929c5e86da 100644 (file)
@@ -1212,7 +1212,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
        mutex_lock(&event_mutex);
        list_for_each_entry(file, &tr->events, list) {
                call = file->event_call;
-               if (!trace_event_name(call) || !call->class || !call->class->reg)
+               if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
+                   !trace_event_name(call) || !call->class || !call->class->reg)
                        continue;
 
                if (system && strcmp(call->class->system, system->name) != 0)
index a21e6a5792c5a53fcdb3ffd29328d9d5f6d5afad..f0b2ccb1bb0182694733683501b55dc4d7dc2bb2 100644 (file)
@@ -592,14 +592,15 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
 }
 
 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
-                               __wsum *csum, struct iov_iter *i)
+                                        struct csum_state *csstate,
+                                        struct iov_iter *i)
 {
        struct pipe_inode_info *pipe = i->pipe;
        unsigned int p_mask = pipe->ring_size - 1;
+       __wsum sum = csstate->csum;
+       size_t off = csstate->off;
        unsigned int i_head;
        size_t n, r;
-       size_t off = 0;
-       __wsum sum = *csum;
 
        if (!sanity(i))
                return 0;
@@ -621,7 +622,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
                i_head++;
        } while (n);
        i->count -= bytes;
-       *csum = sum;
+       csstate->csum = sum;
+       csstate->off = off;
        return bytes;
 }
 
@@ -1522,18 +1524,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
 }
 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
 
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
                             struct iov_iter *i)
 {
+       struct csum_state *csstate = _csstate;
        const char *from = addr;
-       __wsum *csum = csump;
        __wsum sum, next;
-       size_t off = 0;
+       size_t off;
 
        if (unlikely(iov_iter_is_pipe(i)))
-               return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
+               return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
 
-       sum = *csum;
+       sum = csstate->csum;
+       off = csstate->off;
        if (unlikely(iov_iter_is_discard(i))) {
                WARN_ON(1);     /* for now */
                return 0;
@@ -1561,7 +1564,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
                off += v.iov_len;
        })
        )
-       *csum = sum;
+       csstate->csum = sum;
+       csstate->off = off;
        return bytes;
 }
 EXPORT_SYMBOL(csum_and_copy_to_iter);
index e529428e7a1110600badb017ff468c219d944def..d558799b25b3ce3bd7a6d07baf94e9f0b637007c 100644 (file)
@@ -134,12 +134,8 @@ void __init kasan_init_hw_tags(void)
 
        switch (kasan_arg_stacktrace) {
        case KASAN_ARG_STACKTRACE_DEFAULT:
-               /*
-                * Default to enabling stack trace collection for
-                * debug kernels.
-                */
-               if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
-                       static_branch_enable(&kasan_flag_stacktrace);
+               /* Default to enabling stack trace collection. */
+               static_branch_enable(&kasan_flag_stacktrace);
                break;
        case KASAN_ARG_STACKTRACE_OFF:
                /* Do nothing, kasan_flag_stacktrace keeps its default value. */
index e2de77b5bcc2fb2afe109447ec754e17b4b17d7c..913c2b9e5c72d9e034fff6c9c390c321fb2b6032 100644 (file)
@@ -6271,6 +6271,8 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
        if (err)
                return err;
 
+       page_counter_set_high(&memcg->memory, high);
+
        for (;;) {
                unsigned long nr_pages = page_counter_read(&memcg->memory);
                unsigned long reclaimed;
@@ -6294,10 +6296,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
                        break;
        }
 
-       page_counter_set_high(&memcg->memory, high);
-
        memcg_wb_domain_size_changed(memcg);
-
        return nbytes;
 }
 
index f554320281ccd830fd171c04d33e970e158d30fa..aa63bfd3cad271c0a1a167cba36026981692bd77 100644 (file)
@@ -336,8 +336,9 @@ enum pgt_entry {
  * valid. Else returns a smaller extent bounded by the end of the source and
  * destination pgt_entry.
  */
-static unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr,
-                       unsigned long old_end, unsigned long new_addr)
+static __always_inline unsigned long get_extent(enum pgt_entry entry,
+                       unsigned long old_addr, unsigned long old_end,
+                       unsigned long new_addr)
 {
        unsigned long next, extent, mask, size;
 
index 7ecbbbe5bc0c1fe65e3fc52b881205b18189b771..b22a4b101c846ea651e34b271280803a45fee867 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3423,6 +3423,7 @@ static inline int calculate_order(unsigned int size)
        unsigned int order;
        unsigned int min_objects;
        unsigned int max_objects;
+       unsigned int nr_cpus;
 
        /*
         * Attempt to find best configuration for a slab. This
@@ -3433,8 +3434,21 @@ static inline int calculate_order(unsigned int size)
         * we reduce the minimum objects required in a slab.
         */
        min_objects = slub_min_objects;
-       if (!min_objects)
-               min_objects = 4 * (fls(num_online_cpus()) + 1);
+       if (!min_objects) {
+               /*
+                * Some architectures will only update present cpus when
+                * onlining them, so don't trust the number if it's just 1. But
+                * we also don't want to use nr_cpu_ids always, as on some other
+                * architectures, there can be many possible cpus, but never
+                * onlined. Here we compromise between trying to avoid too high
+                * order on systems that appear larger than they are, and too
+                * low order on systems that appear smaller than they are.
+                */
+               nr_cpus = num_present_cpus();
+               if (nr_cpus <= 1)
+                       nr_cpus = nr_cpu_ids;
+               min_objects = 4 * (fls(nr_cpus) + 1);
+       }
        max_objects = order_objects(slub_max_order, size);
        min_objects = min(min_objects, max_objects);
 
index cec2c4e4561d06540a31a47ca6ae1a8f47c168b2..5aeae6ad17b37d8beac099bfd7db44b841941516 100644 (file)
@@ -557,19 +557,22 @@ int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
 int br_mrp_set_port_state(struct net_bridge_port *p,
                          enum br_mrp_port_state_type state)
 {
+       u32 port_state;
+
        if (!p || !(p->flags & BR_MRP_AWARE))
                return -EINVAL;
 
        spin_lock_bh(&p->br->lock);
 
        if (state == BR_MRP_PORT_STATE_FORWARDING)
-               p->state = BR_STATE_FORWARDING;
+               port_state = BR_STATE_FORWARDING;
        else
-               p->state = BR_STATE_BLOCKING;
+               port_state = BR_STATE_BLOCKING;
 
+       p->state = port_state;
        spin_unlock_bh(&p->br->lock);
 
-       br_mrp_port_switchdev_set_state(p, state);
+       br_mrp_port_switchdev_set_state(p, port_state);
 
        return 0;
 }
index ed547e03ace173565b3ceaaf9d371bf40197931a..75a7e8d0a26853a2f5f167f89b4255915b0c26f3 100644 (file)
@@ -169,13 +169,12 @@ int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp,
        return err;
 }
 
-int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
-                                   enum br_mrp_port_state_type state)
+int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state)
 {
        struct switchdev_attr attr = {
                .orig_dev = p->dev,
-               .id = SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
-               .u.mrp_port_state = state,
+               .id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+               .u.stp_state = state,
        };
        int err;
 
index 32a48e5418dac67af8daf4dcf9b4312ab89c321f..2514954c14316c00c5cb2a1e2dd0f49f3ccadc25 100644 (file)
@@ -72,8 +72,7 @@ int br_mrp_switchdev_set_ring_state(struct net_bridge *br, struct br_mrp *mrp,
 int br_mrp_switchdev_send_ring_test(struct net_bridge *br, struct br_mrp *mrp,
                                    u32 interval, u8 max_miss, u32 period,
                                    bool monitor);
-int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
-                                   enum br_mrp_port_state_type state);
+int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state);
 int br_mrp_port_switchdev_set_role(struct net_bridge_port *p,
                                   enum br_mrp_port_role_type role);
 int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp,
index 81809fa735a78000b88b60b860615267a94b6497..15ab9ffb27fe999c9038d67d822de715d1e61baa 100644 (file)
@@ -721,8 +721,16 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                      struct iov_iter *to, int len,
                                      __wsum *csump)
 {
-       return __skb_datagram_iter(skb, offset, to, len, true,
-                       csum_and_copy_to_iter, csump);
+       struct csum_state csdata = { .csum = *csump };
+       int ret;
+
+       ret = __skb_datagram_iter(skb, offset, to, len, true,
+                                 csum_and_copy_to_iter, &csdata);
+       if (ret)
+               return ret;
+
+       *csump = csdata.csum;
+       return 0;
 }
 
 /**
index a979b86dbacda9dfe31dd8b269024f7f0f5a8ef1..449b45b843d40ece7dd1e2ed6a5996ee1db9f591 100644 (file)
@@ -5735,10 +5735,11 @@ static void gro_normal_list(struct napi_struct *napi)
 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
  * pass the whole batch up to the stack.
  */
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
 {
        list_add_tail(&skb->list, &napi->rx_list);
-       if (++napi->rx_count >= gro_normal_batch)
+       napi->rx_count += segs;
+       if (napi->rx_count >= gro_normal_batch)
                gro_normal_list(napi);
 }
 
@@ -5777,7 +5778,7 @@ static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
        }
 
 out:
-       gro_normal_one(napi, skb);
+       gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
        return NET_RX_SUCCESS;
 }
 
@@ -6067,7 +6068,7 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi,
 {
        switch (ret) {
        case GRO_NORMAL:
-               gro_normal_one(napi, skb);
+               gro_normal_one(napi, skb, 1);
                break;
 
        case GRO_DROP:
@@ -6155,7 +6156,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
                __skb_push(skb, ETH_HLEN);
                skb->protocol = eth_type_trans(skb, skb->dev);
                if (ret == GRO_NORMAL)
-                       gro_normal_one(napi, skb);
+                       gro_normal_one(napi, skb, 1);
                break;
 
        case GRO_DROP:
index a47e0f9b20d0a98e04486b4104e90b41122eca36..a04fd637b4cdcc3466f09bbdf2e80b5b41394e60 100644 (file)
@@ -462,20 +462,23 @@ static int dsa_switch_setup(struct dsa_switch *ds)
                ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
                if (!ds->slave_mii_bus) {
                        err = -ENOMEM;
-                       goto unregister_notifier;
+                       goto teardown;
                }
 
                dsa_slave_mii_bus_init(ds);
 
                err = mdiobus_register(ds->slave_mii_bus);
                if (err < 0)
-                       goto unregister_notifier;
+                       goto teardown;
        }
 
        ds->setup = true;
 
        return 0;
 
+teardown:
+       if (ds->ops->teardown)
+               ds->ops->teardown(ds);
 unregister_notifier:
        dsa_switch_unregister_notifier(ds);
 unregister_devlink_ports:
index cd9a9bd242babbc1bb8e3fdbbe88fc0c9b5b5c85..51ec8256b7fa9e45455e10c0f16448c12e87b4dd 100644 (file)
@@ -69,7 +69,7 @@ config MAC80211_MESH
 config MAC80211_LEDS
        bool "Enable LED triggers"
        depends on MAC80211
-       depends on LEDS_CLASS
+       depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
        select LEDS_TRIGGERS
        help
          This option enables a few LED triggers for different
index 234b7cab37c301cd66025e43ffa5d9ce2e50ae47..ff0168736f6ea7309639bab9e73c0d72d7044203 100644 (file)
@@ -1229,7 +1229,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
                         * Let nf_ct_resolve_clash() deal with this later.
                         */
                        if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-                                             &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+                                             &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+                                             nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
                                continue;
 
                        NF_CT_STAT_INC_ATOMIC(net, found);
index 513f78db3cb2f0812956b06e89e329b3f6243700..4a4acbba78ff77292c2ddfa24e0ed7e7bde1090e 100644 (file)
@@ -399,7 +399,7 @@ static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
                return -1;
 
        tcph = (void *)(skb_network_header(skb) + thoff);
-       inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
+       inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
 
        return 0;
 }
@@ -415,7 +415,7 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
        udph = (void *)(skb_network_header(skb) + thoff);
        if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
                inet_proto_csum_replace2(&udph->check, skb, port,
-                                        new_port, true);
+                                        new_port, false);
                if (!udph->check)
                        udph->check = CSUM_MANGLED_0;
        }
index 8d3aa97b52e71950ed34eacb9dc69d0cee1ab7ef..8ee9f40cc0ea2816839a3c7264fcb65eebd5c9e7 100644 (file)
@@ -5281,6 +5281,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        struct nft_expr *expr_array[NFT_SET_EXPR_MAX] = {};
        struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
        u8 genmask = nft_genmask_next(ctx->net);
+       u32 flags = 0, size = 0, num_exprs = 0;
        struct nft_set_ext_tmpl tmpl;
        struct nft_set_ext *ext, *ext2;
        struct nft_set_elem elem;
@@ -5290,7 +5291,6 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        struct nft_data_desc desc;
        enum nft_registers dreg;
        struct nft_trans *trans;
-       u32 flags = 0, size = 0;
        u64 timeout;
        u64 expiration;
        int err, i;
@@ -5356,7 +5356,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        if (nla[NFTA_SET_ELEM_EXPR]) {
                struct nft_expr *expr;
 
-               if (set->num_exprs != 1)
+               if (set->num_exprs && set->num_exprs != 1)
                        return -EOPNOTSUPP;
 
                expr = nft_set_elem_expr_alloc(ctx, set,
@@ -5365,8 +5365,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                        return PTR_ERR(expr);
 
                expr_array[0] = expr;
+               num_exprs = 1;
 
-               if (set->exprs[0] && set->exprs[0]->ops != expr->ops) {
+               if (set->num_exprs && set->exprs[0]->ops != expr->ops) {
                        err = -EOPNOTSUPP;
                        goto err_set_elem_expr;
                }
@@ -5375,12 +5376,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                struct nlattr *tmp;
                int left;
 
-               if (set->num_exprs == 0)
-                       return -EOPNOTSUPP;
-
                i = 0;
                nla_for_each_nested(tmp, nla[NFTA_SET_ELEM_EXPRESSIONS], left) {
-                       if (i == set->num_exprs) {
+                       if (i == NFT_SET_EXPR_MAX ||
+                           (set->num_exprs && set->num_exprs == i)) {
                                err = -E2BIG;
                                goto err_set_elem_expr;
                        }
@@ -5394,14 +5393,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                                goto err_set_elem_expr;
                        }
                        expr_array[i] = expr;
+                       num_exprs++;
 
-                       if (expr->ops != set->exprs[i]->ops) {
+                       if (set->num_exprs && expr->ops != set->exprs[i]->ops) {
                                err = -EOPNOTSUPP;
                                goto err_set_elem_expr;
                        }
                        i++;
                }
-               if (set->num_exprs != i) {
+               if (set->num_exprs && set->num_exprs != i) {
                        err = -EOPNOTSUPP;
                        goto err_set_elem_expr;
                }
@@ -5409,6 +5409,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                err = nft_set_elem_expr_clone(ctx, set, expr_array);
                if (err < 0)
                        goto err_set_elem_expr_clone;
+
+               num_exprs = set->num_exprs;
        }
 
        err = nft_setelem_parse_key(ctx, set, &elem.key.val,
@@ -5433,8 +5435,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                        nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
        }
 
-       if (set->num_exprs) {
-               for (i = 0; i < set->num_exprs; i++)
+       if (num_exprs) {
+               for (i = 0; i < num_exprs; i++)
                        size += expr_array[i]->ops->size;
 
                nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS,
@@ -5522,7 +5524,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                *nft_set_ext_obj(ext) = obj;
                obj->use++;
        }
-       for (i = 0; i < set->num_exprs; i++)
+       for (i = 0; i < num_exprs; i++)
                nft_set_elem_expr_setup(ext, i, expr_array);
 
        trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
@@ -5584,7 +5586,7 @@ err_parse_key_end:
 err_parse_key:
        nft_data_release(&elem.key.val, NFT_DATA_VALUE);
 err_set_elem_expr:
-       for (i = 0; i < set->num_exprs && expr_array[i]; i++)
+       for (i = 0; i < num_exprs && expr_array[i]; i++)
                nft_expr_destroy(ctx, expr_array[i]);
 err_set_elem_expr_clone:
        return err;
@@ -8949,6 +8951,17 @@ int __nft_release_basechain(struct nft_ctx *ctx)
 }
 EXPORT_SYMBOL_GPL(__nft_release_basechain);
 
+static void __nft_release_hooks(struct net *net)
+{
+       struct nft_table *table;
+       struct nft_chain *chain;
+
+       list_for_each_entry(table, &net->nft.tables, list) {
+               list_for_each_entry(chain, &table->chains, list)
+                       nf_tables_unregister_hook(net, table, chain);
+       }
+}
+
 static void __nft_release_tables(struct net *net)
 {
        struct nft_flowtable *flowtable, *nf;
@@ -8964,10 +8977,6 @@ static void __nft_release_tables(struct net *net)
 
        list_for_each_entry_safe(table, nt, &net->nft.tables, list) {
                ctx.family = table->family;
-
-               list_for_each_entry(chain, &table->chains, list)
-                       nf_tables_unregister_hook(net, table, chain);
-               /* No packets are walking on these chains anymore. */
                ctx.table = table;
                list_for_each_entry(chain, &table->chains, list) {
                        ctx.chain = chain;
@@ -9016,6 +9025,11 @@ static int __net_init nf_tables_init_net(struct net *net)
        return 0;
 }
 
+static void __net_exit nf_tables_pre_exit_net(struct net *net)
+{
+       __nft_release_hooks(net);
+}
+
 static void __net_exit nf_tables_exit_net(struct net *net)
 {
        mutex_lock(&net->nft.commit_mutex);
@@ -9029,8 +9043,9 @@ static void __net_exit nf_tables_exit_net(struct net *net)
 }
 
 static struct pernet_operations nf_tables_net_ops = {
-       .init   = nf_tables_init_net,
-       .exit   = nf_tables_exit_net,
+       .init           = nf_tables_init_net,
+       .pre_exit       = nf_tables_pre_exit_net,
+       .exit           = nf_tables_exit_net,
 };
 
 static int __init nf_tables_module_init(void)
index 606411869698e00067f99c0ce56a7d3657b05f82..0446307516cdff3b9f219808b95a8a241a82e29b 100644 (file)
@@ -152,7 +152,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
 /*
  * Drop entries with timestamps older then 'time'.
  */
-static void recent_entry_reap(struct recent_table *t, unsigned long time)
+static void recent_entry_reap(struct recent_table *t, unsigned long time,
+                             struct recent_entry *working, bool update)
 {
        struct recent_entry *e;
 
@@ -161,6 +162,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
         */
        e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
 
+       /*
+        * Do not reap the entry which are going to be updated.
+        */
+       if (e == working && update)
+               return;
+
        /*
         * The last time stamp is the most recent.
         */
@@ -303,7 +310,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
                /* info->seconds must be non-zero */
                if (info->check_set & XT_RECENT_REAP)
-                       recent_entry_reap(t, time);
+                       recent_entry_reap(t, time, e,
+                               info->check_set & XT_RECENT_UPDATE && ret);
        }
 
        if (info->check_set & XT_RECENT_SET ||
index 15ce9b642b25f380bc7dba1cd1d4bb2ecd550ee0..b238c40a998422cb9d889db5360bdf73f04e2885 100644 (file)
@@ -80,6 +80,12 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
        ssize_t ret;
        void *kbuf;
 
+       if (!len)
+               return -EINVAL;
+
+       if (len > KMALLOC_MAX_SIZE)
+               return -ENOMEM;
+
        kbuf = kzalloc(len, GFP_KERNEL);
        if (!kbuf)
                return -ENOMEM;
index c845594b663fb770da85d9b276c156822e08edda..4eb91d958a48d3f89c17bcf1d43eadc57a748d70 100644 (file)
@@ -548,8 +548,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
                rxrpc_disconnect_call(call);
        if (call->security)
                call->security->free_call_crypto(call);
-
-       rxrpc_cleanup_ring(call);
        _leave("");
 }
 
index f7da88ae20a57e5db04e54bd5eea243143484d02..982a87b3e11f8f6edde79b3767629f0599789e1e 100644 (file)
@@ -215,6 +215,12 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
 {
        struct sctp_ht_iter *iter = seq->private;
 
+       if (v && v != SEQ_START_TOKEN) {
+               struct sctp_transport *transport = v;
+
+               sctp_transport_put(transport);
+       }
+
        sctp_transport_walk_stop(&iter->hti);
 }
 
@@ -222,6 +228,12 @@ static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct sctp_ht_iter *iter = seq->private;
 
+       if (v && v != SEQ_START_TOKEN) {
+               struct sctp_transport *transport = v;
+
+               sctp_transport_put(transport);
+       }
+
        ++*pos;
 
        return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
@@ -277,8 +289,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
                sk->sk_rcvbuf);
        seq_printf(seq, "\n");
 
-       sctp_transport_put(transport);
-
        return 0;
 }
 
@@ -354,8 +364,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
                seq_printf(seq, "\n");
        }
 
-       sctp_transport_put(transport);
-
        return 0;
 }
 
index 6894f21dc147557ed1540120b1045d6e9d9956f3..5546710d8ac1a52fbabbac9e27d0ea687875ace8 100644 (file)
@@ -943,10 +943,12 @@ static int vsock_shutdown(struct socket *sock, int mode)
         */
 
        sk = sock->sk;
+
+       lock_sock(sk);
        if (sock->state == SS_UNCONNECTED) {
                err = -ENOTCONN;
                if (sk->sk_type == SOCK_STREAM)
-                       return err;
+                       goto out;
        } else {
                sock->state = SS_DISCONNECTING;
                err = 0;
@@ -955,10 +957,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
        /* Receive and send shutdowns are treated alike. */
        mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
        if (mode) {
-               lock_sock(sk);
                sk->sk_shutdown |= mode;
                sk->sk_state_change(sk);
-               release_sock(sk);
 
                if (sk->sk_type == SOCK_STREAM) {
                        sock_reset_flag(sk, SOCK_DONE);
@@ -966,6 +966,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
                }
        }
 
+out:
+       release_sock(sk);
        return err;
 }
 
@@ -1233,7 +1235,7 @@ static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
 {
        const struct vsock_transport *transport = vsk->transport;
 
-       if (!transport->cancel_pkt)
+       if (!transport || !transport->cancel_pkt)
                return -EOPNOTSUPP;
 
        return transport->cancel_pkt(vsk);
@@ -1243,7 +1245,6 @@ static void vsock_connect_timeout(struct work_struct *work)
 {
        struct sock *sk;
        struct vsock_sock *vsk;
-       int cancel = 0;
 
        vsk = container_of(work, struct vsock_sock, connect_work.work);
        sk = sk_vsock(vsk);
@@ -1254,11 +1255,9 @@ static void vsock_connect_timeout(struct work_struct *work)
                sk->sk_state = TCP_CLOSE;
                sk->sk_err = ETIMEDOUT;
                sk->sk_error_report(sk);
-               cancel = 1;
+               vsock_transport_cancel_pkt(vsk);
        }
        release_sock(sk);
-       if (cancel)
-               vsock_transport_cancel_pkt(vsk);
 
        sock_put(sk);
 }
index 630b851f8150fd2e67eac5187d75a1771b82f068..cc3bae2659e7916fcda89d2af2637894c49a59f4 100644 (file)
@@ -474,14 +474,10 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
 
 static int hvs_shutdown(struct vsock_sock *vsk, int mode)
 {
-       struct sock *sk = sk_vsock(vsk);
-
        if (!(mode & SEND_SHUTDOWN))
                return 0;
 
-       lock_sock(sk);
        hvs_shutdown_lock_held(vsk->trans, mode);
-       release_sock(sk);
        return 0;
 }
 
index 5956939eebb780ce1f1e17e1a15b2a85808f295c..e4370b1b7494760b1ea8fd806afd0c95d4acdadc 100644 (file)
@@ -1130,8 +1130,6 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
 
        vsk = vsock_sk(sk);
 
-       space_available = virtio_transport_space_update(sk, pkt);
-
        lock_sock(sk);
 
        /* Check if sk has been closed before lock_sock */
@@ -1142,6 +1140,8 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
                goto free_pkt;
        }
 
+       space_available = virtio_transport_space_update(sk, pkt);
+
        /* Update CID in case it has changed after a transport reset event */
        vsk->local_addr.svm_cid = dst.svm_cid;
 
index e0088c2d38a5d7ecf952bc8deb931c29b615d52b..426d07875a48e4676a204d6cda1cb85aff30248f 100644 (file)
@@ -133,7 +133,10 @@ FIXTURE_VARIANT_ADD(tls, 13_chacha)
 
 FIXTURE_SETUP(tls)
 {
-       union tls_crypto_context tls12;
+       union {
+               struct tls12_crypto_info_aes_gcm_128 aes128;
+               struct tls12_crypto_info_chacha20_poly1305 chacha20;
+       } tls12;
        struct sockaddr_in addr;
        socklen_t len;
        int sfd, ret;
@@ -143,14 +146,16 @@ FIXTURE_SETUP(tls)
        len = sizeof(addr);
 
        memset(&tls12, 0, sizeof(tls12));
-       tls12.info.version = variant->tls_version;
-       tls12.info.cipher_type = variant->cipher_type;
        switch (variant->cipher_type) {
        case TLS_CIPHER_CHACHA20_POLY1305:
-               tls12_sz = sizeof(tls12_crypto_info_chacha20_poly1305);
+               tls12_sz = sizeof(struct tls12_crypto_info_chacha20_poly1305);
+               tls12.chacha20.info.version = variant->tls_version;
+               tls12.chacha20.info.cipher_type = variant->cipher_type;
                break;
        case TLS_CIPHER_AES_GCM_128:
-               tls12_sz = sizeof(tls12_crypto_info_aes_gcm_128);
+               tls12_sz = sizeof(struct tls12_crypto_info_aes_gcm_128);
+               tls12.aes128.info.version = variant->tls_version;
+               tls12.aes128.info.cipher_type = variant->cipher_type;
                break;
        default:
                tls12_sz = 0;
index 490a8cca708a81811a09b6ccf7ecc690ea02d60a..fabb1d555ee5c00a0cafea97d371ca442be78530 100644 (file)
@@ -26,6 +26,7 @@
 #include <inttypes.h>
 #include <linux/errqueue.h>
 #include <linux/if_ether.h>
+#include <linux/if_packet.h>
 #include <linux/ipv6.h>
 #include <linux/net_tstamp.h>
 #include <netdb.h>
@@ -34,7 +35,6 @@
 #include <netinet/ip.h>
 #include <netinet/udp.h>
 #include <netinet/tcp.h>
-#include <netpacket/packet.h>
 #include <poll.h>
 #include <stdarg.h>
 #include <stdbool.h>
@@ -495,12 +495,12 @@ static void do_test(int family, unsigned int report_opt)
        total_len = cfg_payload_len;
        if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) {
                total_len += sizeof(struct udphdr);
-               if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW)
+               if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW) {
                        if (family == PF_INET)
                                total_len += sizeof(struct iphdr);
                        else
                                total_len += sizeof(struct ipv6hdr);
-
+               }
                /* special case, only rawv6_sendmsg:
                 * pass proto in sin6_port if not connected
                 * also see ANK comment in net/ipv4/raw.c
index 087f0e6e71ce7472c7b788056087a3a477d4a23b..f33154c04d3441ab4e2f9aba189b899d7ff441cd 100755 (executable)
@@ -23,7 +23,7 @@ ip -net "$ns0" addr add 127.0.0.1 dev lo
 
 trap cleanup EXIT
 
-currentyear=$(date +%G)
+currentyear=$(date +%Y)
 lastyear=$((currentyear-1))
 ip netns exec "$ns0" nft -f /dev/stdin <<EOF
 table inet filter {
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
deleted file mode 100755 (executable)
index e953f3c..0000000
+++ /dev/null
@@ -1,349 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#please run as root
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-mnt=./huge
-exitcode=0
-
-#get huge pagesize and freepages from /proc/meminfo
-while read name size unit; do
-       if [ "$name" = "HugePages_Free:" ]; then
-               freepgs=$size
-       fi
-       if [ "$name" = "Hugepagesize:" ]; then
-               hpgsize_KB=$size
-       fi
-done < /proc/meminfo
-
-# Simple hugetlbfs tests have a hardcoded minimum requirement of
-# huge pages totaling 256MB (262144KB) in size.  The userfaultfd
-# hugetlb test requires a minimum of 2 * nr_cpus huge pages.  Take
-# both of these requirements into account and attempt to increase
-# number of huge pages available.
-nr_cpus=$(nproc)
-hpgsize_MB=$((hpgsize_KB / 1024))
-half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
-needmem_KB=$((half_ufd_size_MB * 2 * 1024))
-
-#set proper nr_hugepages
-if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
-       nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
-       needpgs=$((needmem_KB / hpgsize_KB))
-       tries=2
-       while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do
-               lackpgs=$(( $needpgs - $freepgs ))
-               echo 3 > /proc/sys/vm/drop_caches
-               echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
-               if [ $? -ne 0 ]; then
-                       echo "Please run this test as root"
-                       exit $ksft_skip
-               fi
-               while read name size unit; do
-                       if [ "$name" = "HugePages_Free:" ]; then
-                               freepgs=$size
-                       fi
-               done < /proc/meminfo
-               tries=$((tries - 1))
-       done
-       if [ $freepgs -lt $needpgs ]; then
-               printf "Not enough huge pages available (%d < %d)\n" \
-                      $freepgs $needpgs
-               exit 1
-       fi
-else
-       echo "no hugetlbfs support in kernel?"
-       exit 1
-fi
-
-#filter 64bit architectures
-ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64"
-if [ -z $ARCH ]; then
-  ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'`
-fi
-VADDR64=0
-echo "$ARCH64STR" | grep $ARCH && VADDR64=1
-
-mkdir $mnt
-mount -t hugetlbfs none $mnt
-
-echo "---------------------"
-echo "running hugepage-mmap"
-echo "---------------------"
-./hugepage-mmap
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-shmmax=`cat /proc/sys/kernel/shmmax`
-shmall=`cat /proc/sys/kernel/shmall`
-echo 268435456 > /proc/sys/kernel/shmmax
-echo 4194304 > /proc/sys/kernel/shmall
-echo "--------------------"
-echo "running hugepage-shm"
-echo "--------------------"
-./hugepage-shm
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-echo $shmmax > /proc/sys/kernel/shmmax
-echo $shmall > /proc/sys/kernel/shmall
-
-echo "-------------------"
-echo "running map_hugetlb"
-echo "-------------------"
-./map_hugetlb
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "NOTE: The above hugetlb tests provide minimal coverage.  Use"
-echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for"
-echo "      hugetlb regression testing."
-
-echo "---------------------------"
-echo "running map_fixed_noreplace"
-echo "---------------------------"
-./map_fixed_noreplace
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "------------------------------------------------------"
-echo "running: gup_test -u # get_user_pages_fast() benchmark"
-echo "------------------------------------------------------"
-./gup_test -u
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "------------------------------------------------------"
-echo "running: gup_test -a # pin_user_pages_fast() benchmark"
-echo "------------------------------------------------------"
-./gup_test -a
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "------------------------------------------------------------"
-echo "# Dump pages 0, 19, and 4096, using pin_user_pages:"
-echo "running: gup_test -ct -F 0x1 0 19 0x1000 # dump_page() test"
-echo "------------------------------------------------------------"
-./gup_test -ct -F 0x1 0 19 0x1000
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "-------------------"
-echo "running userfaultfd"
-echo "-------------------"
-./userfaultfd anon 20 16
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "---------------------------"
-echo "running userfaultfd_hugetlb"
-echo "---------------------------"
-# Test requires source and destination huge pages.  Size of source
-# (half_ufd_size_MB) is passed as argument to test.
-./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-rm -f $mnt/ufd_test_file
-
-echo "-------------------------"
-echo "running userfaultfd_shmem"
-echo "-------------------------"
-./userfaultfd shmem 20 16
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-#cleanup
-umount $mnt
-rm -rf $mnt
-echo $nr_hugepgs > /proc/sys/vm/nr_hugepages
-
-echo "-----------------------"
-echo "running compaction_test"
-echo "-----------------------"
-./compaction_test
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "----------------------"
-echo "running on-fault-limit"
-echo "----------------------"
-sudo -u nobody ./on-fault-limit
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "--------------------"
-echo "running map_populate"
-echo "--------------------"
-./map_populate
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "-------------------------"
-echo "running mlock-random-test"
-echo "-------------------------"
-./mlock-random-test
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "--------------------"
-echo "running mlock2-tests"
-echo "--------------------"
-./mlock2-tests
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "-------------------"
-echo "running mremap_test"
-echo "-------------------"
-./mremap_test
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "-----------------"
-echo "running thuge-gen"
-echo "-----------------"
-./thuge-gen
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-if [ $VADDR64 -ne 0 ]; then
-echo "-----------------------------"
-echo "running virtual_address_range"
-echo "-----------------------------"
-./virtual_address_range
-if [ $? -ne 0 ]; then
-       echo "[FAIL]"
-       exitcode=1
-else
-       echo "[PASS]"
-fi
-
-echo "-----------------------------"
-echo "running virtual address 128TB switch test"
-echo "-----------------------------"
-./va_128TBswitch
-if [ $? -ne 0 ]; then
-    echo "[FAIL]"
-    exitcode=1
-else
-    echo "[PASS]"
-fi
-fi # VADDR64
-
-echo "------------------------------------"
-echo "running vmalloc stability smoke test"
-echo "------------------------------------"
-./test_vmalloc.sh smoke
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
-       echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
-        echo "[SKIP]"
-        exitcode=$ksft_skip
-else
-       echo "[FAIL]"
-       exitcode=1
-fi
-
-echo "------------------------------------"
-echo "running MREMAP_DONTUNMAP smoke test"
-echo "------------------------------------"
-./mremap_dontunmap
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
-       echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
-        echo "[SKIP]"
-        exitcode=$ksft_skip
-else
-       echo "[FAIL]"
-       exitcode=1
-fi
-
-echo "running HMM smoke test"
-echo "------------------------------------"
-./test_hmm.sh smoke
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
-       echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
-       echo "[SKIP]"
-       exitcode=$ksft_skip
-else
-       echo "[FAIL]"
-       exitcode=1
-fi
-
-exit $exitcode
diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh
new file mode 100755 (executable)
index 0000000..e953f3c
--- /dev/null
@@ -0,0 +1,349 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#please run as root
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+mnt=./huge
+exitcode=0
+
+#get huge pagesize and freepages from /proc/meminfo
+while read name size unit; do
+       if [ "$name" = "HugePages_Free:" ]; then
+               freepgs=$size
+       fi
+       if [ "$name" = "Hugepagesize:" ]; then
+               hpgsize_KB=$size
+       fi
+done < /proc/meminfo
+
+# Simple hugetlbfs tests have a hardcoded minimum requirement of
+# huge pages totaling 256MB (262144KB) in size.  The userfaultfd
+# hugetlb test requires a minimum of 2 * nr_cpus huge pages.  Take
+# both of these requirements into account and attempt to increase
+# number of huge pages available.
+nr_cpus=$(nproc)
+hpgsize_MB=$((hpgsize_KB / 1024))
+half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
+needmem_KB=$((half_ufd_size_MB * 2 * 1024))
+
+#set proper nr_hugepages
+if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
+       nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
+       needpgs=$((needmem_KB / hpgsize_KB))
+       tries=2
+       while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do
+               lackpgs=$(( $needpgs - $freepgs ))
+               echo 3 > /proc/sys/vm/drop_caches
+               echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
+               if [ $? -ne 0 ]; then
+                       echo "Please run this test as root"
+                       exit $ksft_skip
+               fi
+               while read name size unit; do
+                       if [ "$name" = "HugePages_Free:" ]; then
+                               freepgs=$size
+                       fi
+               done < /proc/meminfo
+               tries=$((tries - 1))
+       done
+       if [ $freepgs -lt $needpgs ]; then
+               printf "Not enough huge pages available (%d < %d)\n" \
+                      $freepgs $needpgs
+               exit 1
+       fi
+else
+       echo "no hugetlbfs support in kernel?"
+       exit 1
+fi
+
+#filter 64bit architectures
+ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64"
+if [ -z $ARCH ]; then
+  ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'`
+fi
+VADDR64=0
+echo "$ARCH64STR" | grep $ARCH && VADDR64=1
+
+mkdir $mnt
+mount -t hugetlbfs none $mnt
+
+echo "---------------------"
+echo "running hugepage-mmap"
+echo "---------------------"
+./hugepage-mmap
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+shmmax=`cat /proc/sys/kernel/shmmax`
+shmall=`cat /proc/sys/kernel/shmall`
+echo 268435456 > /proc/sys/kernel/shmmax
+echo 4194304 > /proc/sys/kernel/shmall
+echo "--------------------"
+echo "running hugepage-shm"
+echo "--------------------"
+./hugepage-shm
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+echo $shmmax > /proc/sys/kernel/shmmax
+echo $shmall > /proc/sys/kernel/shmall
+
+echo "-------------------"
+echo "running map_hugetlb"
+echo "-------------------"
+./map_hugetlb
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "NOTE: The above hugetlb tests provide minimal coverage.  Use"
+echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for"
+echo "      hugetlb regression testing."
+
+echo "---------------------------"
+echo "running map_fixed_noreplace"
+echo "---------------------------"
+./map_fixed_noreplace
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "------------------------------------------------------"
+echo "running: gup_test -u # get_user_pages_fast() benchmark"
+echo "------------------------------------------------------"
+./gup_test -u
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "------------------------------------------------------"
+echo "running: gup_test -a # pin_user_pages_fast() benchmark"
+echo "------------------------------------------------------"
+./gup_test -a
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "------------------------------------------------------------"
+echo "# Dump pages 0, 19, and 4096, using pin_user_pages:"
+echo "running: gup_test -ct -F 0x1 0 19 0x1000 # dump_page() test"
+echo "------------------------------------------------------------"
+./gup_test -ct -F 0x1 0 19 0x1000
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "-------------------"
+echo "running userfaultfd"
+echo "-------------------"
+./userfaultfd anon 20 16
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "---------------------------"
+echo "running userfaultfd_hugetlb"
+echo "---------------------------"
+# Test requires source and destination huge pages.  Size of source
+# (half_ufd_size_MB) is passed as argument to test.
+./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+rm -f $mnt/ufd_test_file
+
+echo "-------------------------"
+echo "running userfaultfd_shmem"
+echo "-------------------------"
+./userfaultfd shmem 20 16
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+#cleanup
+umount $mnt
+rm -rf $mnt
+echo $nr_hugepgs > /proc/sys/vm/nr_hugepages
+
+echo "-----------------------"
+echo "running compaction_test"
+echo "-----------------------"
+./compaction_test
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "----------------------"
+echo "running on-fault-limit"
+echo "----------------------"
+sudo -u nobody ./on-fault-limit
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "--------------------"
+echo "running map_populate"
+echo "--------------------"
+./map_populate
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "-------------------------"
+echo "running mlock-random-test"
+echo "-------------------------"
+./mlock-random-test
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "--------------------"
+echo "running mlock2-tests"
+echo "--------------------"
+./mlock2-tests
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "-------------------"
+echo "running mremap_test"
+echo "-------------------"
+./mremap_test
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "-----------------"
+echo "running thuge-gen"
+echo "-----------------"
+./thuge-gen
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+if [ $VADDR64 -ne 0 ]; then
+echo "-----------------------------"
+echo "running virtual_address_range"
+echo "-----------------------------"
+./virtual_address_range
+if [ $? -ne 0 ]; then
+       echo "[FAIL]"
+       exitcode=1
+else
+       echo "[PASS]"
+fi
+
+echo "-----------------------------"
+echo "running virtual address 128TB switch test"
+echo "-----------------------------"
+./va_128TBswitch
+if [ $? -ne 0 ]; then
+    echo "[FAIL]"
+    exitcode=1
+else
+    echo "[PASS]"
+fi
+fi # VADDR64
+
+echo "------------------------------------"
+echo "running vmalloc stability smoke test"
+echo "------------------------------------"
+./test_vmalloc.sh smoke
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+       echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+        echo "[SKIP]"
+        exitcode=$ksft_skip
+else
+       echo "[FAIL]"
+       exitcode=1
+fi
+
+echo "------------------------------------"
+echo "running MREMAP_DONTUNMAP smoke test"
+echo "------------------------------------"
+./mremap_dontunmap
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+       echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+        echo "[SKIP]"
+        exitcode=$ksft_skip
+else
+       echo "[FAIL]"
+       exitcode=1
+fi
+
+echo "running HMM smoke test"
+echo "------------------------------------"
+./test_hmm.sh smoke
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+       echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+       echo "[SKIP]"
+       exitcode=$ksft_skip
+else
+       echo "[FAIL]"
+       exitcode=1
+fi
+
+exit $exitcode