]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
Merge tag 'trace-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Apr 2019 23:15:57 +0000 (13:15 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Apr 2019 23:15:57 +0000 (13:15 -1000)
Pull syscall-get-arguments cleanup and fixes from Steven Rostedt:
 "Andy Lutomirski approached me to tell me that the
  syscall_get_arguments() implementation in x86 was horrible and gcc
  certainly gets it wrong.

  He said that since the tracepoints only pass in 0 and 6 for i and n
  repectively, it should be optimized for that case. Inspecting the
  kernel, I discovered that all users pass in 0 for i and only one file
  passing in something other than 6 for the number of arguments. That
  code happens to be my own code used for the special syscall tracing.

  That can easily be converted to just using 0 and 6 as well, and only
  copying what is needed. Which is probably the faster path anyway for
  that case.

  Along the way, a couple of real fixes came from this as the
  syscall_get_arguments() function was incorrect for csky and riscv.

  x86 has been optimized to for the new interface that removes the
  variable number of arguments, but the other architectures could still
  use some loving and take more advantage of the simpler interface"

* tag 'trace-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  syscalls: Remove start and number from syscall_set_arguments() args
  syscalls: Remove start and number from syscall_get_arguments() args
  csky: Fix syscall_get_arguments() and syscall_set_arguments()
  riscv: Fix syscall_get_arguments() and syscall_set_arguments()
  tracing/syscalls: Pass in hardcoded 6 into syscall_get_arguments()
  ptrace: Remove maxargs from task_current_syscall()

176 files changed:
.mailmap
Documentation/bpf/btf.rst
Documentation/devicetree/bindings/hwmon/adc128d818.txt
Documentation/networking/bpf_flow_dissector.rst [new file with mode: 0644]
Documentation/networking/index.rst
MAINTAINERS
arch/riscv/include/asm/fixmap.h
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/module.c
arch/riscv/kernel/setup.c
arch/riscv/mm/Makefile
arch/riscv/mm/init.c
drivers/acpi/acpica/evgpe.c
drivers/char/Kconfig
drivers/cpufreq/intel_pstate.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-quirks.c
drivers/hid/hid-steam.c
drivers/hid/hid-uclogic-params.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hwmon/Kconfig
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/occ/common.c
drivers/mfd/Kconfig
drivers/mfd/sprd-sc27xx-spi.c
drivers/mfd/twl-core.c
drivers/net/bonding/bond_sysfs_slave.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/usb/qmi_wwan.c
drivers/net/vrf.c
fs/aio.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/smb2file.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2proto.h
fs/debugfs/inode.c
fs/jffs2/readinode.c
fs/jffs2/super.c
fs/ubifs/super.c
include/linux/mii.h
include/linux/mlx5/driver.h
include/net/ip.h
include/net/net_namespace.h
include/net/netns/hash.h
include/net/sch_generic.h
kernel/bpf/cpumap.c
kernel/bpf/inode.c
kernel/bpf/verifier.c
kernel/signal.c
mm/compaction.c
net/8021q/vlan_dev.c
net/batman-adv/bat_v_elp.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/bridge/br_multicast.c
net/core/datagram.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/flow_dissector.c
net/core/net_namespace.c
net/core/skbuff.c
net/dccp/feat.c
net/dsa/tag_qca.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_ipv4.c
net/ipv6/ila/ila_xlat.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/sit.c
net/kcm/kcmsock.c
net/openvswitch/flow_netlink.c
net/rds/tcp.c
net/sched/act_sample.c
net/sched/cls_matchall.c
net/sched/sch_cake.c
net/sched/sch_cbq.c
net/sched/sch_drr.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_taprio.c
net/sched/sch_tbf.c
net/sctp/protocol.c
net/tipc/netlink_compat.c
net/tls/tls_sw.c
tools/lib/bpf/Makefile
tools/lib/bpf/btf.c
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/progs/bpf_flow.c
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/verifier/calls.c
tools/testing/selftests/tc-testing/tc-tests/actions/sample.json

index b2cde8668dcc38f85c6f0f1bac9da5fa2e9f63b9..ae2bcad06f4b58eb3db18020df75cabeb59b6ed0 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -156,6 +156,8 @@ Morten Welinder <welinder@darter.rentec.com>
 Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
+Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
+Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
 Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
index 9a60a5d60e380ab2204334086a58ebef1f3b11e5..7313d354f20e6402e23161f24ddab227afb7c0a1 100644 (file)
@@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
 for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
 
 The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
-for this int. For example, a bitfield struct member has: * btf member bit
-offset 100 from the start of the structure, * btf member pointing to an int
-type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
+for this int. For example, a bitfield struct member has:
+ * btf member bit offset 100 from the start of the structure,
+ * btf member pointing to an int type,
+ * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
 
 Then in the struct memory layout, this member will occupy ``4`` bits starting
 from bits ``100 + 2 = 102``.
 
 Alternatively, the bitfield struct member can be the following to access the
 same bits as the above:
-
  * btf member bit offset 102,
  * btf member pointing to an int type,
  * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
index 08bab0e94d25a21b8ed4d87738641e5ffe74bfa2..d0ae46d7bac370d9db369d9e471ef632be2482d9 100644 (file)
@@ -26,7 +26,7 @@ Required node properties:
 
 Optional node properties:
 
- - ti,mode:     Operation mode (see above).
+ - ti,mode:     Operation mode (u8) (see above).
 
 
 Example (operation mode 2):
@@ -34,5 +34,5 @@ Example (operation mode 2):
        adc128d818@1d {
                compatible = "ti,adc128d818";
                reg = <0x1d>;
-               ti,mode = <2>;
+               ti,mode = /bits/ 8 <2>;
        };
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/networking/bpf_flow_dissector.rst
new file mode 100644 (file)
index 0000000..b375ae2
--- /dev/null
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+BPF Flow Dissector
+==================
+
+Overview
+========
+
+Flow dissector is a routine that parses metadata out of the packets. It's
+used in the various places in the networking subsystem (RFS, flow hash, etc).
+
+BPF flow dissector is an attempt to reimplement C-based flow dissector logic
+in BPF to gain all the benefits of BPF verifier (namely, limits on the
+number of instructions and tail calls).
+
+API
+===
+
+BPF flow dissector programs operate on an ``__sk_buff``. However, only the
+limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``.
+``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
+and output arguments.
+
+The inputs are:
+  * ``nhoff`` - initial offset of the networking header
+  * ``thoff`` - initial offset of the transport header, initialized to nhoff
+  * ``n_proto`` - L3 protocol type, parsed out of L2 header
+
+Flow dissector BPF program should fill out the rest of the ``struct
+bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be
+also adjusted accordingly.
+
+The return code of the BPF program is either BPF_OK to indicate successful
+dissection, or BPF_DROP to indicate parsing error.
+
+__sk_buff->data
+===============
+
+In the VLAN-less case, this is what the initial state of the BPF flow
+dissector looks like::
+
+  +------+------+------------+-----------+
+  | DMAC | SMAC | ETHER_TYPE | L3_HEADER |
+  +------+------+------------+-----------+
+                              ^
+                              |
+                              +-- flow dissector starts here
+
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point to the first byte of L3_HEADER
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = ETHER_TYPE
+
+In case of VLAN, flow dissector can be called with the two different states.
+
+Pre-VLAN parsing::
+
+  +------+------+------+-----+-----------+-----------+
+  | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+  +------+------+------+-----+-----------+-----------+
+                        ^
+                        |
+                        +-- flow dissector starts here
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point the to first byte of TCI
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = TPID
+
+Please note that TPID can be 802.1AD and, hence, BPF program would
+have to parse VLAN information twice for double tagged packets.
+
+
+Post-VLAN parsing::
+
+  +------+------+------+-----+-----------+-----------+
+  | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+  +------+------+------+-----+-----------+-----------+
+                                          ^
+                                          |
+                                          +-- flow dissector starts here
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point the to first byte of L3_HEADER
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = ETHER_TYPE
+
+In this case VLAN information has been processed before the flow dissector
+and BPF flow dissector is not required to handle it.
+
+
+The takeaway here is as follows: BPF flow dissector program can be called with
+the optional VLAN header and should gracefully handle both cases: when single
+or double VLAN is present and when it is not present. The same program
+can be called for both cases and would have to be written carefully to
+handle both cases.
+
+
+Reference Implementation
+========================
+
+See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference
+implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]``
+for the loader. bpftool can be used to load BPF flow dissector program as well.
+
+The reference implementation is organized as follows:
+  * ``jmp_table`` map that contains sub-programs for each supported L3 protocol
+  * ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and
+    does ``bpf_tail_call`` to the appropriate L3 handler
+
+Since BPF at this point doesn't support looping (or any jumping back),
+jmp_table is used instead to handle multiple levels of encapsulation (and
+IPv6 options).
+
+
+Current Limitations
+===================
+BPF flow dissector doesn't support exporting all the metadata that in-kernel
+C-based implementation can export. Notable example is single VLAN (802.1Q)
+and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
+for a set of information that's currently can be exported from the BPF context.
index 5449149be496fa8448fa5b74bafe2c5c796cb06d..984e68f9e0269507132846517a4c4c2b8d726216 100644 (file)
@@ -9,6 +9,7 @@ Contents:
    netdev-FAQ
    af_xdp
    batman-adv
+   bpf_flow_dissector
    can
    can_ucan_protocol
    device_drivers/freescale/dpaa2/index
index 43b36dbed48e77bffcc94ea79b8a2e6d79ad0aae..6771bd784f5f5f0567b51f3561023b7b504f0dcd 100644 (file)
@@ -4129,7 +4129,7 @@ F:        drivers/cpuidle/*
 F:     include/linux/cpuidle.h
 
 CRAMFS FILESYSTEM
-M:     Nicolas Pitre <nico@linaro.org>
+M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Maintained
 F:     Documentation/filesystems/cramfs.txt
 F:     fs/cramfs/
@@ -5833,7 +5833,7 @@ L:        netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-bus-mdio
 F:     Documentation/devicetree/bindings/net/mdio*
-F:     Documentation/networking/phy.txt
+F:     Documentation/networking/phy.rst
 F:     drivers/net/phy/
 F:     drivers/of/of_mdio.c
 F:     drivers/of/of_net.c
@@ -13981,7 +13981,7 @@ F:      drivers/media/rc/serial_ir.c
 SFC NETWORK DRIVER
 M:     Solarflare linux maintainers <linux-net-drivers@solarflare.com>
 M:     Edward Cree <ecree@solarflare.com>
-M:     Bert Kenward <bkenward@solarflare.com>
+M:     Martin Habets <mhabets@solarflare.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/sfc/
index 57afe604b495bef44894b5088517c103376684d4..c207f6634b91c4ecc8f60b759c82056dd5624ed4 100644 (file)
@@ -26,7 +26,7 @@ enum fixed_addresses {
 };
 
 #define FIXADDR_SIZE           (__end_of_fixed_addresses * PAGE_SIZE)
-#define FIXADDR_TOP            (PAGE_OFFSET)
+#define FIXADDR_TOP            (VMALLOC_START)
 #define FIXADDR_START          (FIXADDR_TOP - FIXADDR_SIZE)
 
 #define FIXMAP_PAGE_IO         PAGE_KERNEL
index a00168b980d2e6ca265ae0424045508275fbbe3f..fb53a8089e769473434493d59bc408079dcbb519 100644 (file)
@@ -300,7 +300,7 @@ do {                                                                \
                "       .balign 4\n"                            \
                "4:\n"                                          \
                "       li %0, %6\n"                            \
-               "       jump 2b, %1\n"                          \
+               "       jump 3b, %1\n"                          \
                "       .previous\n"                            \
                "       .section __ex_table,\"a\"\n"            \
                "       .balign " RISCV_SZPTR "\n"                      \
index f13f7f276639d504679034a36c53edc15f25dfe1..598568168d3511406fea38b23360c7e28a50a41f 100644 (file)
@@ -4,7 +4,6 @@
 
 ifdef CONFIG_FTRACE
 CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_setup.o = -pg
 endif
 
 extra-y += head.o
@@ -29,8 +28,6 @@ obj-y += vdso.o
 obj-y  += cacheinfo.o
 obj-y  += vdso/
 
-CFLAGS_setup.o := -mcmodel=medany
-
 obj-$(CONFIG_FPU)              += fpu.o
 obj-$(CONFIG_SMP)              += smpboot.o
 obj-$(CONFIG_SMP)              += smp.o
index 7dd308129b40f1862ab04dc1e12c790bf7c111fe..2872edce894d1e0b79d58a4ed735649dd8261408 100644 (file)
@@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
 {
        s32 hi20;
 
-       if (IS_ENABLED(CMODEL_MEDLOW)) {
+       if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
                  me->name, (long long)v, location);
index ecb654f6a79ef105931a51950d520c1af845edff..540a331d1376922c62ba17bf0d9c786714d89948 100644 (file)
@@ -48,14 +48,6 @@ struct screen_info screen_info = {
 };
 #endif
 
-unsigned long va_pa_offset;
-EXPORT_SYMBOL(va_pa_offset);
-unsigned long pfn_base;
-EXPORT_SYMBOL(pfn_base);
-
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 /* The lucky hart to first increment this variable will boot the other cores */
 atomic_t hart_lottery;
 unsigned long boot_cpu_hartid;
index eb22ab49b3e008ec4ab677778302d5dbbea358b1..b68aac7018031cd5afe4ebb293051cbcc814969e 100644 (file)
@@ -1,3 +1,9 @@
+
+CFLAGS_init.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_init.o = -pg
+endif
+
 obj-y += init.o
 obj-y += fault.o
 obj-y += extable.o
index b379a75ac6a6778052b9161612357ba5df620648..5fd8c922e1c225f504c5c81349b49492f8489d19 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/io.h>
 
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+                                                       __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
 static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
@@ -143,6 +147,11 @@ void __init setup_bootmem(void)
        }
 }
 
+unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
+unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 
@@ -172,6 +181,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
        }
 }
 
+/*
+ * setup_vm() is called from head.S with MMU-off.
+ *
+ * Following requirements should be honoured for setup_vm() to work
+ * correctly:
+ * 1) It should use PC-relative addressing for accessing kernel symbols.
+ *    To achieve this we always use GCC cmodel=medany.
+ * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
+ *    so disable compiler instrumentation when FTRACE is enabled.
+ *
+ * Currently, the above requirements are honoured by using custom CFLAGS
+ * for init.o in mm/Makefile.
+ */
+
+#ifndef __riscv_cmodel_medany
+#error "setup_vm() is called from head.S before relocate so it should "
+       "not use absolute addressing."
+#endif
+
 asmlinkage void __init setup_vm(void)
 {
        extern char _start;
index 62d3aa74277b4d03cb4bd1e7d5cee705864bd41b..5e9d7348c16f784f93ea117d537dbfbfe454a783 100644 (file)
@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 
        ACPI_FUNCTION_TRACE(ev_enable_gpe);
 
-       /* Enable the requested GPE */
+       /* Clear the GPE status */
+       status = acpi_hw_clear_gpe(gpe_event_info);
+       if (ACPI_FAILURE(status))
+               return_ACPI_STATUS(status);
 
+       /* Enable the requested GPE */
        status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
        return_ACPI_STATUS(status);
 }
index 72866a004f075b79257c9d2df0c7b5b60852c31e..466ebd84ad1774096ecc45dd9f4ebe13ac785602 100644 (file)
@@ -348,7 +348,7 @@ config XILINX_HWICAP
 
 config R3964
        tristate "Siemens R3964 line discipline"
-       depends on TTY
+       depends on TTY && BROKEN
        ---help---
          This driver allows synchronous communication with devices using the
          Siemens R3964 packet protocol. Unless you are dealing with special
index b599c7318aab4ea9302779b8527ece4a91151914..2986119dd31fb8391e3256a88942272870acad44 100644 (file)
@@ -2596,6 +2596,9 @@ static int __init intel_pstate_init(void)
        const struct x86_cpu_id *id;
        int rc;
 
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return -ENODEV;
+
        if (no_load)
                return -ENODEV;
 
@@ -2611,7 +2614,7 @@ static int __init intel_pstate_init(void)
        } else {
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id) {
-                       pr_info("CPU ID not supported\n");
+                       pr_info("CPU model not supported\n");
                        return -ENODEV;
                }
 
index 4f8fb4ecde3419fe8449ddfcea859f17242e6919..ac0d646a7b743d96d8817d7d60b4ff63b8ad581f 100644 (file)
@@ -3625,6 +3625,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
        struct pci_dev *pdev = adev->pdev;
        enum pci_bus_speed cur_speed;
        enum pcie_link_width cur_width;
+       u32 ret = 1;
 
        *speed = PCI_SPEED_UNKNOWN;
        *width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3633,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
        while (pdev) {
                cur_speed = pcie_get_speed_cap(pdev);
                cur_width = pcie_get_width_cap(pdev);
+               ret = pcie_bandwidth_available(adev->pdev, NULL,
+                                                      NULL, &cur_width);
+               if (!ret)
+                       cur_width = PCIE_LNK_WIDTH_RESRV;
 
                if (cur_speed != PCI_SPEED_UNKNOWN) {
                        if (*speed == PCI_SPEED_UNKNOWN)
index d0309e8c9d12cdafa95d2a23e84018f4bb6b8035..a11db2b1a63f41e16acd4df34a24b2f3e6db9140 100644 (file)
@@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
        /* disable CG */
        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
 
-       adev->gfx.rlc.funcs->reset(adev);
-
        gfx_v9_0_init_pg(adev);
 
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
index 4eba3c4800b63bef00ec9fd532919aa84ca72126..ea18e9c2d8cea5c65582274a297d67b1d0fbb82d 100644 (file)
@@ -2660,12 +2660,18 @@ void core_link_enable_stream(
 void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
 {
        struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
+       struct dc_stream_state *stream = pipe_ctx->stream;
 
        core_dc->hwss.blank_stream(pipe_ctx);
 
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                deallocate_mst_payload(pipe_ctx);
 
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+               dal_ddc_service_write_scdc_data(
+                       stream->link->ddc, 0,
+                       stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
        core_dc->hwss.disable_stream(pipe_ctx, option);
 
        disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
index 9aa7bec1b5fe6f3aeb67da66d4b88b2e16966bbb..23b5b94a4939ac809c40448f1aa33e5d1500f93e 100644 (file)
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
         *   MP0CLK DS
         */
        data->registry_data.disallowed_features = 0xE0041C00;
+       /* ECC feature should be disabled on old SMUs */
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+       hwmgr->smu_version = smum_get_argument(hwmgr);
+       if (hwmgr->smu_version < 0x282100)
+               data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
+
        data->registry_data.od_state_in_dc_support = 0;
        data->registry_data.thermal_support = 1;
        data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
        data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
        data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
+       data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
 
        for (i = 0; i < GNLD_FEATURES_MAX; i++) {
                data->smu_features[i].smu_feature_bitmap =
@@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
                                "FCLK_DS",
                                "MP1CLK_DS",
                                "MP0CLK_DS",
-                               "XGMI"};
+                               "XGMI",
+                               "ECC"};
        static const char *output_title[] = {
                                "FEATURES",
                                "BITMASK",
@@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
        struct vega20_single_dpm_table *dpm_table;
        bool vblank_too_short = false;
        bool disable_mclk_switching;
+       bool disable_fclk_switching;
        uint32_t i, latency;
 
        disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
        if (hwmgr->display_config->nb_pstate_switch_disable)
                dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
 
+       if ((disable_mclk_switching &&
+           (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
+            hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
+               disable_fclk_switching = true;
+       else
+               disable_fclk_switching = false;
+
        /* fclk */
        dpm_table = &(data->dpm_table.fclk_table);
        dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
        dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
        dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
        dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
-       if (hwmgr->display_config->nb_pstate_switch_disable)
+       if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
                dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
 
        /* vclk */
index a5bc758ae09728327bd1230dbbf9969460eba263..ac2a3118a0ae779224be91fd750dda2319027a64 100644 (file)
@@ -80,6 +80,7 @@ enum {
        GNLD_DS_MP1CLK,
        GNLD_DS_MP0CLK,
        GNLD_XGMI,
+       GNLD_ECC,
 
        GNLD_FEATURES_MAX
 };
index 63d5cf69154967b90aa696de2ae5c1d407bd579f..195c4ae67058554d1d2bf194cedf9762f79a96e3 100644 (file)
@@ -99,7 +99,7 @@
 #define FEATURE_DS_MP1CLK_BIT           30
 #define FEATURE_DS_MP0CLK_BIT           31
 #define FEATURE_XGMI_BIT                32
-#define FEATURE_SPARE_33_BIT            33
+#define FEATURE_ECC_BIT                 33
 #define FEATURE_SPARE_34_BIT            34
 #define FEATURE_SPARE_35_BIT            35
 #define FEATURE_SPARE_36_BIT            36
 #define FEATURE_DS_FCLK_MASK            (1 << FEATURE_DS_FCLK_BIT            )
 #define FEATURE_DS_MP1CLK_MASK          (1 << FEATURE_DS_MP1CLK_BIT          )
 #define FEATURE_DS_MP0CLK_MASK          (1 << FEATURE_DS_MP0CLK_BIT          )
-#define FEATURE_XGMI_MASK               (1 << FEATURE_XGMI_BIT               )
+#define FEATURE_XGMI_MASK               (1ULL << FEATURE_XGMI_BIT               )
+#define FEATURE_ECC_MASK                (1ULL << FEATURE_ECC_BIT                )
 
 #define DPM_OVERRIDE_DISABLE_SOCCLK_PID             0x00000001
 #define DPM_OVERRIDE_DISABLE_UCLK_PID               0x00000002
index 035479e273beca866575c4bef70438583029d2df..e3f9caa7839f7347e1eaa25a798c6574446e813f 100644 (file)
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
 /**
  * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
  * @vgpu: a vGPU
- * @conncted: link state
+ * @connected: link state
  *
  * This function is used to trigger hotplug interrupt for vGPU
  *
index 3e7e2b80c8579017cecdda478bc6166e1f46e061..5d887f7cc0d5c91aebdcdb11e01569c86b891d27 100644 (file)
@@ -238,9 +238,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                default:
                        gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
                }
-
-               info->size = (((p.stride * p.height * p.bpp) / 8) +
-                             (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
                ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
                if (ret)
@@ -262,14 +259,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                        info->x_hot = UINT_MAX;
                        info->y_hot = UINT_MAX;
                }
-
-               info->size = (((info->stride * c.height * c.bpp) / 8)
-                               + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else {
                gvt_vgpu_err("invalid plane id:%d\n", plane_id);
                return -EINVAL;
        }
 
+       info->size = (info->stride * info->height + PAGE_SIZE - 1)
+                     >> PAGE_SHIFT;
        if (info->size == 0) {
                gvt_vgpu_err("fb size is zero\n");
                return -EINVAL;
index d7052ab7908c8d9c7872df64cb5cd68ec8f13b4e..cf133ef038735d64e6655b78c087992f5f1d1984 100644 (file)
@@ -1946,7 +1946,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
  */
 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
 {
-       atomic_dec(&mm->pincount);
+       atomic_dec_if_positive(&mm->pincount);
 }
 
 /**
index 159192c097cc7eb7424070e8cec052f3f5e5b1f7..05b953793316b28ac1fb19c902474e468ba828b0 100644 (file)
@@ -1486,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                intel_runtime_pm_put_unchecked(dev_priv);
        }
 
-       if (ret && (vgpu_is_vm_unhealthy(ret))) {
-               enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+       if (ret) {
+               if (vgpu_is_vm_unhealthy(ret))
+                       enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
                intel_vgpu_destroy_workload(workload);
                return ERR_PTR(ret);
        }
index 0bd890c04fe4f7c911bd9bde1a79af11af08ff5c..f6f6e5b78e9784c0ffee5f7132a8ddd2a9339954 100644 (file)
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
                ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
                                       &ctx);
                if (ret) {
-                       ret = -EINTR;
+                       if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+                               try_again = true;
+                               continue;
+                       }
                        break;
                }
                crtc = connector->state->crtc;
index 6ca8d322b487279348d90513caa10d6bb6745e40..4ca0cdfa6b33af35f951a3969cf9c02b67d7a1b0 100644 (file)
@@ -150,6 +150,7 @@ config HID_ASUS
        tristate "Asus"
        depends on LEDS_CLASS
        depends on ASUS_WMI || ASUS_WMI=n
+       select POWER_SUPPLY
        ---help---
        Support for Asus notebook built-in keyboard and touchpad via i2c, and
        the Asus Republic of Gamers laptop keyboard special keys.
index 9993b692598fb84d1700e26ef7f97856ff842955..860e21ec6a492a35392f2b47146f5e0a811c1068 100644 (file)
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
                        unsigned offset, unsigned n)
 {
-       if (n > 32) {
-               hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
+       if (n > 256) {
+               hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
                         n, current->comm);
-               n = 32;
+               n = 256;
        }
 
        return __extract(report, offset, n);
index ac9fda1b5a7233c227cd5517dcf6331a29483a60..1384e57182af978e4329c9e946e3487f85229be5 100644 (file)
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
        seq_printf(f, "\n\n");
 
        /* dump parsed data and input mappings */
+       if (down_interruptible(&hdev->driver_input_lock))
+               return 0;
+
        hid_dump_device(hdev, f);
        seq_printf(f, "\n");
        hid_dump_input_mapping(hdev, f);
 
+       up(&hdev->driver_input_lock);
+
        return 0;
 }
 
index b6d93f4ad037e440d1e5d23d76058e4be606159c..adce58f24f7638a70c170f17a694b5baa7f5a49a 100644 (file)
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+#define I2C_DEVICE_ID_SYNAPTICS_7E7E   0x7e7e
 
 #define USB_VENDOR_ID_TEXAS_INSTRUMENTS        0x2047
 #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA    0x0855
index b10b1922c5bdf304a4f32100365da00f0f1572f4..1fce0076e7dc470e94cf561bf8c55b78cd92c6f2 100644 (file)
@@ -998,6 +998,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x1b8: map_key_clear(KEY_VIDEO);           break;
                case 0x1bc: map_key_clear(KEY_MESSENGER);       break;
                case 0x1bd: map_key_clear(KEY_INFO);            break;
+               case 0x1cb: map_key_clear(KEY_ASSISTANT);       break;
                case 0x201: map_key_clear(KEY_NEW);             break;
                case 0x202: map_key_clear(KEY_OPEN);            break;
                case 0x203: map_key_clear(KEY_CLOSE);           break;
index 15ed6177a7a364d6b2634babe0df1be83b4cec7b..199cc256e9d9d3903016f64f66b36b909a9c1109 100644 (file)
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
                kfree(data);
                return -ENOMEM;
        }
+       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+       if (!data->wq) {
+               kfree(data->effect_ids);
+               kfree(data);
+               return -ENOMEM;
+       }
+
        data->hidpp = hidpp;
        data->feature_index = feature_index;
        data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        /* ignore boost value at response.fap.params[2] */
 
        /* init the hardware command queue */
-       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
        atomic_set(&data->workqueue_size, 0);
 
        /* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
                input_report_rel(mydata->input, REL_Y, v);
 
                v = hid_snto32(data[6], 8);
-               hidpp_scroll_counter_handle_scroll(
-                               &hidpp->vertical_wheel_counter, v);
+               if (v != 0)
+                       hidpp_scroll_counter_handle_scroll(
+                                       &hidpp->vertical_wheel_counter, v);
 
                input_sync(mydata->input);
        }
index 953908f2267c0653478cf88d53c7e85fdb121d76..77ffba48cc737e0df69892cfcceaafacb9815534 100644 (file)
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
-       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { }
 };
 
-/**
+/*
  * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
  *
  * There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
                if (hdev->product == 0x0401 &&
                    strncmp(hdev->name, "ELAN0800", 8) != 0)
                        return true;
+               /* Same with product id 0x0400 */
+               if (hdev->product == 0x0400 &&
+                   strncmp(hdev->name, "QTEC0001", 8) != 0)
+                       return true;
                break;
        }
 
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
        }
 
        if (bl_entry != NULL)
-               dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+               dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
                        bl_entry->driver_data, bl_entry->vendor,
                        bl_entry->product);
 
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
                quirks |= bl_entry->driver_data;
 
        if (quirks)
-               dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+               dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
                        quirks, hdev->vendor, hdev->product);
        return quirks;
 }
index 8141cadfca0e3c3ce62eccff1c28cd94031827b1..8dae0f9b819e011d6695462fea7e88e85cd16669 100644 (file)
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
 static int steam_register(struct steam_device *steam)
 {
        int ret;
+       bool client_opened;
 
        /*
         * This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
                 * Unlikely, but getting the serial could fail, and it is not so
                 * important, so make up a serial number and go on.
                 */
+               mutex_lock(&steam->mutex);
                if (steam_get_serial(steam) < 0)
                        strlcpy(steam->serial_no, "XXXXXXXXXX",
                                        sizeof(steam->serial_no));
+               mutex_unlock(&steam->mutex);
 
                hid_info(steam->hdev, "Steam Controller '%s' connected",
                                steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
        }
 
        mutex_lock(&steam->mutex);
-       if (!steam->client_opened) {
+       client_opened = steam->client_opened;
+       if (!client_opened)
                steam_set_lizard_mode(steam, lizard_mode);
+       mutex_unlock(&steam->mutex);
+
+       if (!client_opened)
                ret = steam_input_register(steam);
-       } else {
+       else
                ret = 0;
-       }
-       mutex_unlock(&steam->mutex);
 
        return ret;
 }
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
 {
        struct steam_device *steam = hdev->driver_data;
 
+       unsigned long flags;
+       bool connected;
+
+       spin_lock_irqsave(&steam->lock, flags);
+       connected = steam->connected;
+       spin_unlock_irqrestore(&steam->lock, flags);
+
        mutex_lock(&steam->mutex);
        steam->client_opened = false;
+       if (connected)
+               steam_set_lizard_mode(steam, lizard_mode);
        mutex_unlock(&steam->mutex);
 
-       if (steam->connected) {
-               steam_set_lizard_mode(steam, lizard_mode);
+       if (connected)
                steam_input_register(steam);
-       }
 }
 
 static int steam_client_ll_raw_request(struct hid_device *hdev,
index 7710d9f957da5b0dd07ca1444416de7cecd10529..0187c9f8fc22c5567e934cc0cc2089877963c56e 100644 (file)
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
                goto cleanup;
        }
        rc = usb_string(udev, 201, ver_ptr, ver_len);
-       if (ver_ptr == NULL) {
-               rc = -ENOMEM;
-               goto cleanup;
-       }
        if (rc == -EPIPE) {
                *ver_ptr = '\0';
        } else if (rc < 0) {
index 90164fed08d35eca2c34250c8b7cb3814ea99f53..4d1f24ee249c4455a4d5dfe18c7e6b0541311ad4 100644 (file)
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_NO_RUNTIME_PM },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID,
                 I2C_HID_QUIRK_BOGUS_IRQ },
+       { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
+               I2C_HID_QUIRK_NO_RUNTIME_PM },
        { 0, 0 }
 };
 
index 6f929bfa9fcd39380f7e9d9fc9729156e28e09f6..d0f1dfe2bcbbd652aa1daa682d2feac611dfa4da 100644 (file)
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
 config SENSORS_W83773G
        tristate "Nuvoton W83773G"
        depends on I2C
+       select REGMAP_I2C
        help
          If you say yes here you get support for the Nuvoton W83773G hardware
          monitoring chip.
index e4f9f7ce92fabc7c5f10aebaf5d69d350da565d2..f9abeeeead9e966dd8c8df7d225b81788eb67e40 100644 (file)
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
 };
 
 static const u32 ntc_temp_config[] = {
-       HWMON_T_INPUT, HWMON_T_TYPE,
+       HWMON_T_INPUT | HWMON_T_TYPE,
        0
 };
 
index b91a80abf724d087e02cc7aa97e211a1380f6570..4679acb4918e7f65660a8e0e32a7c5f793f03ff0 100644 (file)
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                s++;
                        }
                }
+
+               s = (sensors->power.num_sensors * 4) + 1;
        } else {
                for (i = 0; i < sensors->power.num_sensors; ++i) {
                        s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                                     show_power, NULL, 3, i);
                        attr++;
                }
-       }
 
-       if (sensors->caps.num_sensors >= 1) {
                s = sensors->power.num_sensors + 1;
+       }
 
+       if (sensors->caps.num_sensors >= 1) {
                snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
                attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
                                             0, 0);
index 0ce2d8dfc5f1a19bedbae37f91ce956b1b13da89..26ad6468d13a786552f581e3f45eb444febf0b51 100644 (file)
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
 
 config MFD_SUN6I_PRCM
        bool "Allwinner A31 PRCM controller"
-       depends on ARCH_SUNXI
+       depends on ARCH_SUNXI || COMPILE_TEST
        select MFD_CORE
        help
          Support for the PRCM (Power/Reset/Clock Management) unit available
index 69df27769c2136e817d3baf575269c59902752ac..43ac71691fe477f95eba6293cc6f6b2df810e243 100644 (file)
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
 static const struct mfd_cell sprd_pmic_devs[] = {
        {
                .name = "sc27xx-wdt",
-               .of_compatible = "sprd,sc27xx-wdt",
+               .of_compatible = "sprd,sc2731-wdt",
        }, {
                .name = "sc27xx-rtc",
-               .of_compatible = "sprd,sc27xx-rtc",
+               .of_compatible = "sprd,sc2731-rtc",
        }, {
                .name = "sc27xx-charger",
-               .of_compatible = "sprd,sc27xx-charger",
+               .of_compatible = "sprd,sc2731-charger",
        }, {
                .name = "sc27xx-chg-timer",
-               .of_compatible = "sprd,sc27xx-chg-timer",
+               .of_compatible = "sprd,sc2731-chg-timer",
        }, {
                .name = "sc27xx-fast-chg",
-               .of_compatible = "sprd,sc27xx-fast-chg",
+               .of_compatible = "sprd,sc2731-fast-chg",
        }, {
                .name = "sc27xx-chg-wdt",
-               .of_compatible = "sprd,sc27xx-chg-wdt",
+               .of_compatible = "sprd,sc2731-chg-wdt",
        }, {
                .name = "sc27xx-typec",
-               .of_compatible = "sprd,sc27xx-typec",
+               .of_compatible = "sprd,sc2731-typec",
        }, {
                .name = "sc27xx-flash",
-               .of_compatible = "sprd,sc27xx-flash",
+               .of_compatible = "sprd,sc2731-flash",
        }, {
                .name = "sc27xx-eic",
-               .of_compatible = "sprd,sc27xx-eic",
+               .of_compatible = "sprd,sc2731-eic",
        }, {
                .name = "sc27xx-efuse",
-               .of_compatible = "sprd,sc27xx-efuse",
+               .of_compatible = "sprd,sc2731-efuse",
        }, {
                .name = "sc27xx-thermal",
-               .of_compatible = "sprd,sc27xx-thermal",
+               .of_compatible = "sprd,sc2731-thermal",
        }, {
                .name = "sc27xx-adc",
-               .of_compatible = "sprd,sc27xx-adc",
+               .of_compatible = "sprd,sc2731-adc",
        }, {
                .name = "sc27xx-audio-codec",
-               .of_compatible = "sprd,sc27xx-audio-codec",
+               .of_compatible = "sprd,sc2731-audio-codec",
        }, {
                .name = "sc27xx-regulator",
-               .of_compatible = "sprd,sc27xx-regulator",
+               .of_compatible = "sprd,sc2731-regulator",
        }, {
                .name = "sc27xx-vibrator",
-               .of_compatible = "sprd,sc27xx-vibrator",
+               .of_compatible = "sprd,sc2731-vibrator",
        }, {
                .name = "sc27xx-keypad-led",
-               .of_compatible = "sprd,sc27xx-keypad-led",
+               .of_compatible = "sprd,sc2731-keypad-led",
        }, {
                .name = "sc27xx-bltc",
-               .of_compatible = "sprd,sc27xx-bltc",
+               .of_compatible = "sprd,sc2731-bltc",
        }, {
                .name = "sc27xx-fgu",
-               .of_compatible = "sprd,sc27xx-fgu",
+               .of_compatible = "sprd,sc2731-fgu",
        }, {
                .name = "sc27xx-7sreset",
-               .of_compatible = "sprd,sc27xx-7sreset",
+               .of_compatible = "sprd,sc2731-7sreset",
        }, {
                .name = "sc27xx-poweroff",
-               .of_compatible = "sprd,sc27xx-poweroff",
+               .of_compatible = "sprd,sc2731-poweroff",
        }, {
                .name = "sc27xx-syscon",
-               .of_compatible = "sprd,sc27xx-syscon",
+               .of_compatible = "sprd,sc2731-syscon",
        },
 };
 
index 299016bc46d909b4164708044ae21c9d5d5d14f4..104477b512a296b56e7549f001b63cd1ad9a43ac 100644 (file)
@@ -1245,6 +1245,28 @@ free:
        return status;
 }
 
+static int __maybe_unused twl_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (client->irq)
+               disable_irq(client->irq);
+
+       return 0;
+}
+
+static int __maybe_unused twl_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (client->irq)
+               enable_irq(client->irq);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
+
 static const struct i2c_device_id twl_ids[] = {
        { "twl4030", TWL4030_VAUX2 },   /* "Triton 2" */
        { "twl5030", 0 },               /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
 /* One Client Driver , 4 Clients */
 static struct i2c_driver twl_driver = {
        .driver.name    = DRIVER_NAME,
+       .driver.pm      = &twl_dev_pm_ops,
        .id_table       = twl_ids,
        .probe          = twl_probe,
        .remove         = twl_remove,
index 2f120b2ffef0cfd7d97f6a901f9552fcc58288df..4985268e227330045e1cc0a6f3dadcb81e4c2830 100644 (file)
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
 
 static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
 {
-       return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+       return sprintf(buf, "%*phC\n",
+                      slave->dev->addr_len,
+                      slave->perm_hwaddr);
 }
 static SLAVE_ATTR_RO(perm_hwaddr);
 
index dce84a2a65c71eeec36d10fa9ceb6df0a487866a..c44b2822e4dd064e2cba3a9ca1b0af457893e47e 100644 (file)
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                return 0;
 
        lane = mv88e6390x_serdes_get_lane(chip, port);
-       if (lane < 0)
+       if (lane < 0 && lane != -ENODEV)
                return lane;
 
-       if (chip->ports[port].serdes_irq) {
-               err = mv88e6390_serdes_irq_disable(chip, port, lane);
+       if (lane >= 0) {
+               if (chip->ports[port].serdes_irq) {
+                       err = mv88e6390_serdes_irq_disable(chip, port, lane);
+                       if (err)
+                               return err;
+               }
+
+               err = mv88e6390x_serdes_power(chip, port, false);
                if (err)
                        return err;
        }
 
-       err = mv88e6390x_serdes_power(chip, port, false);
-       if (err)
-               return err;
+       chip->ports[port].cmode = 0;
 
        if (cmode) {
                err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                if (err)
                        return err;
 
+               chip->ports[port].cmode = cmode;
+
+               lane = mv88e6390x_serdes_get_lane(chip, port);
+               if (lane < 0)
+                       return lane;
+
                err = mv88e6390x_serdes_power(chip, port, true);
                if (err)
                        return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                }
        }
 
-       chip->ports[port].cmode = cmode;
-
        return 0;
 }
 
index aa2be480719134f720e9487a3c71b4272cc8efe3..28eac9056211314504f7c1afdd1a4616bce44e68 100644 (file)
@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
        struct nicvf_cq_poll *cq_poll = NULL;
        union nic_mbx mbx = {};
 
-       cancel_delayed_work_sync(&nic->link_change_work);
-
        /* wait till all queued set_rx_mode tasks completes */
-       drain_workqueue(nic->nicvf_rx_mode_wq);
+       if (nic->nicvf_rx_mode_wq) {
+               cancel_delayed_work_sync(&nic->link_change_work);
+               drain_workqueue(nic->nicvf_rx_mode_wq);
+       }
 
        mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
        nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev)
        struct nicvf_cq_poll *cq_poll = NULL;
 
        /* wait till all queued set_rx_mode tasks completes if any */
-       drain_workqueue(nic->nicvf_rx_mode_wq);
+       if (nic->nicvf_rx_mode_wq)
+               drain_workqueue(nic->nicvf_rx_mode_wq);
 
        netif_carrier_off(netdev);
 
@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev)
        /* Send VF config done msg to PF */
        nicvf_send_cfg_done(nic);
 
-       INIT_DELAYED_WORK(&nic->link_change_work,
-                         nicvf_link_status_check_task);
-       queue_delayed_work(nic->nicvf_rx_mode_wq,
-                          &nic->link_change_work, 0);
+       if (nic->nicvf_rx_mode_wq) {
+               INIT_DELAYED_WORK(&nic->link_change_work,
+                                 nicvf_link_status_check_task);
+               queue_delayed_work(nic->nicvf_rx_mode_wq,
+                                  &nic->link_change_work, 0);
+       }
 
        return 0;
 cleanup:
index 5b4d3badcb730b1417739d508bae8b3838afaaf9..e246f9733bb89161ceb2c6d39fcbe330b469ca61 100644 (file)
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
        /* Check if page can be recycled */
        if (page) {
                ref_count = page_ref_count(page);
-               /* Check if this page has been used once i.e 'put_page'
-                * called after packet transmission i.e internal ref_count
-                * and page's ref_count are equal i.e page can be recycled.
+               /* This page can be recycled if internal ref_count and page's
+                * ref_count are equal, indicating that the page has been used
+                * once for packet transmission. For non-XDP mode, internal
+                * ref_count is always '1'.
                 */
-               if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
-                       pgcache->ref_count--;
-               else
-                       page = NULL;
-
-               /* In non-XDP mode, page's ref_count needs to be '1' for it
-                * to be recycled.
-                */
-               if (!rbdr->is_xdp && (ref_count != 1))
+               if (rbdr->is_xdp) {
+                       if (ref_count == pgcache->ref_count)
+                               pgcache->ref_count--;
+                       else
+                               page = NULL;
+               } else if (ref_count != 1) {
                        page = NULL;
+               }
        }
 
        if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
        while (head < rbdr->pgcnt) {
                pgcache = &rbdr->pgcache[head];
                if (pgcache->page && page_ref_count(pgcache->page) != 0) {
-                       if (!rbdr->is_xdp) {
-                               put_page(pgcache->page);
-                               continue;
+                       if (rbdr->is_xdp) {
+                               page_ref_sub(pgcache->page,
+                                            pgcache->ref_count - 1);
                        }
-                       page_ref_sub(pgcache->page, pgcache->ref_count - 1);
                        put_page(pgcache->page);
                }
                head++;
index 74849be5f004f59552892cf642a9b02efb393ac7..e2919005ead3e1592999b140841b0234344e87b7 100644 (file)
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
                ppmax = max;
 
        /* pool size must be multiple of unsigned long */
-       bmap = BITS_TO_LONGS(ppmax);
+       bmap = ppmax / BITS_PER_TYPE(unsigned long);
+       if (!bmap)
+               return NULL;
+
        ppmax = (bmap * sizeof(unsigned long)) << 3;
 
        alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
        if (reserve_factor) {
                ppmax_pool = ppmax / reserve_factor;
                pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+               if (!pool) {
+                       ppmax_pool = 0;
+                       reserve_factor = 0;
+               }
 
                pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
                         ndev->name, ppmax, ppmax_pool, pool_index_max);
index 79d03f8ee7b180d2cab9a2a647254461c0a0cb08..c7fa97a7e1f4d4b07dd6b00f7c5c7bffca4a0356 100644 (file)
@@ -150,7 +150,6 @@ out_buffer_fail:
 /* free desc along with its attached buffer */
 static void hnae_free_desc(struct hnae_ring *ring)
 {
-       hnae_free_buffers(ring);
        dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
                         ring->desc_num * sizeof(ring->desc[0]),
                         ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
 /* fini ring, also free the buffer for the ring */
 static void hnae_fini_ring(struct hnae_ring *ring)
 {
+       if (is_rx_ring(ring))
+               hnae_free_buffers(ring);
+
        hnae_free_desc(ring);
        kfree(ring->desc_cb);
        ring->desc_cb = NULL;
index 08a750fb60c49d397c61845130e153fe1e3b0b3e..d6fb8343723041992d12cfa505f222822bd603fd 100644 (file)
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
 };
 
 struct hnae_queue {
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        phys_addr_t phy_base;
        struct hnae_ae_dev *dev;        /* the device who use this queue */
        struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
index a97228c93831d69fe2211317486f14a25197e740..6c0507921623b843bb2010321782ace35a24b8a3 100644 (file)
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
 static void hns_mac_param_get(struct mac_params *param,
                              struct hns_mac_cb *mac_cb)
 {
-       param->vaddr = (void *)mac_cb->vaddr;
+       param->vaddr = mac_cb->vaddr;
        param->mac_mode = hns_get_enet_interface(mac_cb);
        ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
        param->mac_id = mac_cb->mac_id;
index fbc75341bef760b82a1d7a10469d5649db91e366..22589799f1a575127f77f733e9e21a28889efa46 100644 (file)
@@ -187,7 +187,7 @@ struct mac_statistics {
 /*mac para struct ,mac get param from nic or dsaf when initialize*/
 struct mac_params {
        char addr[ETH_ALEN];
-       void *vaddr; /*virtual address*/
+       u8 __iomem *vaddr; /*virtual address*/
        struct device *dev;
        u8 mac_id;
        /**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
        enum mac_mode mac_mode;
        u8 mac_id;
        struct hns_mac_cb *mac_cb;
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
        unsigned int virt_dev_num;
        struct device *dev;
index ac55db065f167ad58f9ec41966afa7b0299b5f40..61eea6ac846fcfce9d254351221b998dd20a6e47 100644 (file)
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
                       DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
        dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
                       DSAF_TBL_TCAM_KEY_PORT_S, port);
-
-       mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
 }
 
 /**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
        /* default config dvc to 0 */
        mac_data.tbl_ucast_dvc = 0;
        mac_data.tbl_ucast_out_port = mac_entry->port_num;
-       tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-       tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+       tcam_data.tbl_tcam_data_high = mac_key.high.val;
+       tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
        hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
 
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
                                     0xff,
                                     mc_mask);
 
-               mask_key.high.val = le32_to_cpu(mask_key.high.val);
-               mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
                pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
        }
 
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
                dsaf_dev->ae_dev.name, mac_key.high.val,
                mac_key.low.val, entry_index);
 
-       tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-       tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+       tcam_data.tbl_tcam_data_high = mac_key.high.val;
+       tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
        /* config mc entry with mask */
        hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
                /* config key mask */
                hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
 
-               mask_key.high.val = le32_to_cpu(mask_key.high.val);
-               mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
                pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
        }
 
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
                soft_mac_entry += entry_index;
                soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
        } else { /* not zero, just del port, update */
-               tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-               tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+               tcam_data.tbl_tcam_data_high = mac_key.high.val;
+               tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
                hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
                                     &tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
        return DSAF_DUMP_REGS_NUM;
 }
 
+static int hns_dsaf_get_port_id(u8 port)
+{
+       if (port < DSAF_SERVICE_NW_NUM)
+               return port;
+
+       if (port >= DSAF_BASE_INNER_PORT_NUM)
+               return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+       return -EINVAL;
+}
+
 static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
 {
        struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
        memset(&temp_key, 0x0, sizeof(temp_key));
        mask_entry.addr[0] = 0x01;
        hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
-                            port, mask_entry.addr);
+                            0xf, mask_entry.addr);
        tbl_tcam_mcast.tbl_mcast_item_vld = 1;
        tbl_tcam_mcast.tbl_mcast_old_en = 0;
 
-       if (port < DSAF_SERVICE_NW_NUM) {
-               mskid = port;
-       } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
-               mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
-       } else {
+       /* set MAC port to handle multicast */
+       mskid = hns_dsaf_get_port_id(port);
+       if (mskid == -EINVAL) {
                dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
                        dsaf_dev->ae_dev.name, port,
                        mask_key.high.val, mask_key.low.val);
                return;
        }
+       dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+                    mskid % 32, 1);
 
+       /* set pool bit map to handle multicast */
+       mskid = hns_dsaf_get_port_id(port_num);
+       if (mskid == -EINVAL) {
+               dev_err(dsaf_dev->dev,
+                       "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name, port_num,
+                       mask_key.high.val, mask_key.low.val);
+               return;
+       }
        dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
                     mskid % 32, 1);
+
        memcpy(&temp_key, &mask_key, sizeof(mask_key));
        hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
                                   (struct dsaf_tbl_tcam_data *)(&mask_key),
index 0e1cd99831a6083faa790aa80be1f6c635b15a50..76cc8887e1a83599c9178e88787ecb6c6a2b8784 100644 (file)
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
                             u8 mac_id, u8 port_num);
 int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
 
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
 #endif /* __HNS_DSAF_MAIN_H__ */
index 16294cd3c95459891c65080cd49c61f839afaa60..19b94879691f86e0ec969e40ed5a057cb24c3d7b 100644 (file)
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
                dsaf_set_field(origin, 1ull << 10, 10, en);
                dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
        } else {
-               u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+               u8 __iomem *base_addr = mac_cb->serdes_vaddr +
                                (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
                dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
        }
index 3d07c8a7639dad46c5b810b19f56ba84fdfc655a..17c019106e6e40d8f281deb87b1928367582e371 100644 (file)
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
        }
 }
 
-static void __iomem *
+static u8 __iomem *
 hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
 {
        return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
        dsaf_dev->ppe_common[comm_index] = NULL;
 }
 
-static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
-                                       int ppe_idx)
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+                                     int ppe_idx)
 {
        return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
 }
index f670e63a5a018cd5b48b4a62093c104905aa4463..110c6e8222c7038a6eb24c1854490845293e320d 100644 (file)
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
        struct hns_ppe_hw_stats hw_stats;
 
        u8 index;       /* index in a ppe common device */
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        int virq;
        u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
        u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
 struct ppe_common_cb {
        struct device *dev;
        struct dsaf_device *dsaf_dev;
-       void __iomem *io_base;
+       u8 __iomem *io_base;
 
        enum ppe_common_mode ppe_mode;
 
index 6bf346c11b25a5c87bceca81abf1bfc83188d768..ac3518ca4d7bec5b3737cef78081202dfcf1f53d 100644 (file)
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
                mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
        } else {
                ring = &q->tx_ring;
-               ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+               ring->io_base = ring_pair_cb->q.io_base +
                        HNS_RCB_TX_REG_OFFSET;
                irq_idx = HNS_RCB_IRQ_IDX_TX;
                mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
        }
 }
 
-static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
 {
        struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
 
index b9733b0b848263bc9a25ccf42f3d8b433a7b9e5e..b9e7f11f08968099c76a99c3f119e77a909c72ea 100644 (file)
 #define XGMAC_PAUSE_CTL_RSP_MODE_B     2
 #define XGMAC_PAUSE_CTL_TX_XOFF_B      3
 
-static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
 {
        writel(value, base + reg);
 }
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
 #define dsaf_set_bit(origin, shift, val) \
        dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
 
-static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
                                      u32 shift, u32 val)
 {
        u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
 #define dsaf_get_bit(origin, shift) \
        dsaf_get_field((origin), (1ull << (shift)), (shift))
 
-static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
                                     u32 shift)
 {
        u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
        dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
 
 #define dsaf_write_b(addr, data)\
-       writeb((data), (__iomem unsigned char *)(addr))
+       writeb((data), (__iomem u8 *)(addr))
 #define dsaf_read_b(addr)\
-       readb((__iomem unsigned char *)(addr))
+       readb((__iomem u8 *)(addr))
 
 #define hns_mac_reg_read64(drv, offset) \
-       readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+       readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
 
 #endif /* _DSAF_REG_H */
index ba4316910dea1726da855c13b78e95bb6bd36a3c..a60f207768fc7152edbbf9a5394afaa080260d8b 100644 (file)
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
        dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
        dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
        dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
-       dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+       dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
 }
 
 /**
index 60e7d7ae3787c280d9b21319a1493ccc5d570de8..4cd86ba1f050dcf786238c147cd2fc3b799797bc 100644 (file)
@@ -29,9 +29,6 @@
 
 #define SERVICE_TIMER_HZ (1 * HZ)
 
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
 #define RCB_IRQ_NOT_INITED 0
 #define RCB_IRQ_INITED 1
 #define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
        wmb(); /* commit all data before submit */
        assert(skb->queue_mapping < priv->ae_handle->q_num);
        hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
-       ring->stats.tx_pkts++;
-       ring->stats.tx_bytes += skb->len;
 
        return NETDEV_TX_OK;
 
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
                /* issue prefetch for next Tx descriptor */
                prefetch(&ring->desc_cb[ring->next_to_clean]);
        }
+       /* update tx ring statistics. */
+       ring->stats.tx_pkts += pkts;
+       ring->stats.tx_bytes += bytes;
 
        NETIF_TX_UNLOCK(ring);
 
@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                        hns_nic_tx_fini_pro_v2;
 
                netif_napi_add(priv->netdev, &rd->napi,
-                              hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+                              hns_nic_common_poll, NAPI_POLL_WEIGHT);
                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
        }
        for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                        hns_nic_rx_fini_pro_v2;
 
                netif_napi_add(priv->netdev, &rd->napi,
-                              hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+                              hns_nic_common_poll, NAPI_POLL_WEIGHT);
                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
        }
 
index fffe8c1c45d394b2a0723443582d6427ffa6e387..0fb61d440d3bb96a1d5b24b4c5380b3b0c33de4e 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGE) += hclge.o
 hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o  hclge_debugfs.o
index fb93bbd358455a735880d6e83cd314c0773a636a..6193f8fa7cf34aa142f575ca903f42d666847f31 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
 hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file
index baf5cc251f3299499f3fc03ee17513aa740110f6..8b8a7d00e8e0c92d23a9ca67683ce98a0417fddc 100644 (file)
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
 };
 
 struct hns_mdio_device {
-       void *vbase;            /* mdio reg base address */
+       u8 __iomem *vbase;              /* mdio reg base address */
        struct regmap *subctrl_vbase;
        struct hns_mdio_sc_reg sc_reg;
 };
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
 #define MDIO_SC_CLK_ST         0x531C
 #define MDIO_SC_RESET_ST       0x5A1C
 
-static void mdio_write_reg(void *base, u32 reg, u32 value)
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
 {
-       u8 __iomem *reg_addr = (u8 __iomem *)base;
-
-       writel_relaxed(value, reg_addr + reg);
+       writel_relaxed(value, base + reg);
 }
 
 #define MDIO_WRITE_REG(a, reg, value) \
        mdio_write_reg((a)->vbase, (reg), (value))
 
-static u32 mdio_read_reg(void *base, u32 reg)
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
 {
-       u8 __iomem *reg_addr = (u8 __iomem *)base;
-
-       return readl_relaxed(reg_addr + reg);
+       return readl_relaxed(base + reg);
 }
 
 #define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
 
 #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
 
-static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
                               u32 val)
 {
        u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
 #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
        mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
 
-static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
 {
        u32 origin;
 
index 5ecbb1adcf3b9d45fa756af5682245933aa06eb3..51cfe95f3e247d9fd025bc474964289337b4c4d0 100644 (file)
@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
         */
        adapter->state = VNIC_PROBED;
 
+       reinit_completion(&adapter->init_done);
        rc = init_crq_queue(adapter);
        if (rc) {
                netdev_err(adapter->netdev,
@@ -4625,7 +4626,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
        old_num_rx_queues = adapter->req_rx_queues;
        old_num_tx_queues = adapter->req_tx_queues;
 
-       init_completion(&adapter->init_done);
+       reinit_completion(&adapter->init_done);
        adapter->init_done_rc = 0;
        ibmvnic_send_crq_init(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4681,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
 
        adapter->from_passive_init = false;
 
-       init_completion(&adapter->init_done);
        adapter->init_done_rc = 0;
        ibmvnic_send_crq_init(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,6 +4759,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
        spin_lock_init(&adapter->rwi_lock);
+       init_completion(&adapter->init_done);
        adapter->resetting = false;
 
        adapter->mac_change_pending = false;
index 5a0419421511fd7a9a3e44a1a7fa2c4e21f26217..ecef949f3baae022d46082f4a1dc32d18cb082ec 100644 (file)
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
        /* create driver workqueue */
        fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
                                          fm10k_driver_name);
+       if (!fm10k_workqueue)
+               return -ENOMEM;
 
        fm10k_dbg_init();
 
index d684998ba2b03b27230916afb7012951ad987a94..d3cc3427caad187ecf5bf967404f86182082790a 100644 (file)
@@ -790,6 +790,8 @@ struct i40e_vsi {
 
        /* VSI specific handlers */
        irqreturn_t (*irq_handler)(int irq, void *data);
+
+       unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
 } ____cacheline_internodealigned_in_smp;
 
 struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
        return !!vsi->xdp_prog;
 }
 
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
-       bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
-       int qid = ring->queue_index;
-
-       if (ring_is_xdp(ring))
-               qid -= ring->vsi->alloc_queue_pairs;
-
-       if (!xdp_on)
-               return NULL;
-
-       return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
-}
-
 int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
index 4c885801fa2699432d0eaaaca8c4487081ee36b0..7874d0ec7fb0e1a5dc4c42aeed03d7342e8f4fef 100644 (file)
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
-       if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
-                         | (wol->wolopts != WAKE_FILTER))
+       if (wol->wolopts & ~WAKE_MAGIC)
                return -EOPNOTSUPP;
 
        /* is this a new value? */
index da62218eb70ad3f4c95111c3c67f3a4dd541aa50..b1c265012c8ad03d65a6eb56e1a0d5e42371c6be 100644 (file)
@@ -3063,6 +3063,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
                            ring->queue_index);
 }
 
+/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+       bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+       int qid = ring->queue_index;
+
+       if (ring_is_xdp(ring))
+               qid -= ring->vsi->alloc_queue_pairs;
+
+       if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+               return NULL;
+
+       return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
 /**
  * i40e_configure_tx_ring - Configure a transmit ring context and rest
  * @ring: The Tx ring to configure
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        hash_init(vsi->mac_filter_hash);
        vsi->irqs_ready = false;
 
+       if (type == I40E_VSI_MAIN) {
+               vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+               if (!vsi->af_xdp_zc_qps)
+                       goto err_rings;
+       }
+
        ret = i40e_set_num_rings_in_vsi(vsi);
        if (ret)
                goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        goto unlock_pf;
 
 err_rings:
+       bitmap_free(vsi->af_xdp_zc_qps);
        pf->next_vsi = i - 1;
        kfree(vsi);
 unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
+       bitmap_free(vsi->af_xdp_zc_qps);
        i40e_vsi_free_arrays(vsi, true);
        i40e_clear_rss_config_user(vsi);
 
index 5fb4353c742b9038d3ac7f867163d35cd3c3142d..31575c0bb884f2b0520b848d29420dc98438eba4 100644 (file)
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-       struct timespec64 now;
+       struct timespec64 now, then;
 
+       then = ns_to_timespec64(delta);
        mutex_lock(&pf->tmreg_lock);
 
        i40e_ptp_read(pf, &now, NULL);
-       timespec64_add_ns(&now, delta);
+       now = timespec64_add(now, then);
        i40e_ptp_write(pf, (const struct timespec64 *)&now);
 
        mutex_unlock(&pf->tmreg_lock);
index b5c182e688e351eed227f867a8eaebaa6349bb07..1b17486543ac7e078a8723512943bab1c50d6d46 100644 (file)
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
        if (err)
                return err;
 
+       set_bit(qid, vsi->af_xdp_zc_qps);
+
        if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
 
        if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
                        return err;
        }
 
+       clear_bit(qid, vsi->af_xdp_zc_qps);
        i40e_xsk_umem_dma_unmap(vsi, umem);
 
        if (if_running) {
index 01fcfc6f341519c1d8a67b5c8695ec9c04842b0c..d2e2c50ce257941491e4d0a96603808310357999 100644 (file)
 /* enable link status from external LINK_0 and LINK_1 pins */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
 #define E1000_CTRL_SDP0_DIR 0x00400000  /* SDP0 Data direction */
 #define E1000_CTRL_SDP1_DIR 0x00800000  /* SDP1 Data direction */
 #define E1000_CTRL_RST      0x04000000  /* Global reset */
index 69b230c53fed537464ba79b0be3ede75ab13f1da..3269d8e94744f61808893267e1e305122bedd3c5 100644 (file)
@@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, rctl, status;
        u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
-       int retval = 0;
-#endif
+       bool wake;
 
        rtnl_lock();
        netif_device_detach(netdev);
@@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        igb_clear_interrupt_scheme(adapter);
        rtnl_unlock();
 
-#ifdef CONFIG_PM
-       if (!runtime) {
-               retval = pci_save_state(pdev);
-               if (retval)
-                       return retval;
-       }
-#endif
-
        status = rd32(E1000_STATUS);
        if (status & E1000_STATUS_LU)
                wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
                }
 
                ctrl = rd32(E1000_CTRL);
-               /* advertise wake from D3Cold */
-               #define E1000_CTRL_ADVD3WUC 0x00100000
-               /* phy power management enable */
-               #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
                ctrl |= E1000_CTRL_ADVD3WUC;
                wr32(E1000_CTRL, ctrl);
 
@@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
                wr32(E1000_WUFC, 0);
        }
 
-       *enable_wake = wufc || adapter->en_mng_pt;
-       if (!*enable_wake)
+       wake = wufc || adapter->en_mng_pt;
+       if (!wake)
                igb_power_down_link(adapter);
        else
                igb_power_up_link(adapter);
 
+       if (enable_wake)
+               *enable_wake = wake;
+
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
         */
@@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
 
 static int __maybe_unused igb_suspend(struct device *dev)
 {
-       int retval;
-       bool wake;
-       struct pci_dev *pdev = to_pci_dev(dev);
-
-       retval = __igb_shutdown(pdev, &wake, 0);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
-       return 0;
+       return __igb_shutdown(to_pci_dev(dev), NULL, 0);
 }
 
 static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
 
 static int __maybe_unused igb_runtime_suspend(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       int retval;
-       bool wake;
-
-       retval = __igb_shutdown(pdev, &wake, 1);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
-       return 0;
+       return __igb_shutdown(to_pci_dev(dev), NULL, 1);
 }
 
 static int __maybe_unused igb_runtime_resume(struct device *dev)
index cc4907f9ff02c3faecba89c524674f33581d2346..2fb97967961c43893b2d06fef0df9bc476dda426 100644 (file)
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
        struct pci_dev *pdev = adapter->pdev;
        struct device *dev = &adapter->netdev->dev;
        struct mii_bus *bus;
+       int err = -ENODEV;
 
-       adapter->mii_bus = devm_mdiobus_alloc(dev);
-       if (!adapter->mii_bus)
+       bus = devm_mdiobus_alloc(dev);
+       if (!bus)
                return -ENOMEM;
 
-       bus = adapter->mii_bus;
-
        switch (hw->device_id) {
        /* C3000 SoCs */
        case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
         */
        hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
 
-       return mdiobus_register(bus);
+       err = mdiobus_register(bus);
+       if (!err) {
+               adapter->mii_bus = bus;
+               return 0;
+       }
 
 ixgbe_no_mii_bus:
        devm_mdiobus_free(dev, bus);
-       adapter->mii_bus = NULL;
-       return -ENODEV;
+       return err;
 }
 
 /**
index 122927f3a6005b2be70c694ead0a6d4a139069c6..d5e5afbdca6dcbacb776571c97eef74e033cf356 100644 (file)
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
        if (!eproto)
                return -EINVAL;
 
-       if (ext !=  MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
-               return -EOPNOTSUPP;
-
        err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
        if (err)
                return err;
index eac245a93f918c588dc8237e1af5996f3d0f73f0..4ab0d030b54486f67096a190471a747b1ac18c56 100644 (file)
@@ -122,7 +122,9 @@ out:
        return err;
 }
 
-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+ * minimum speed value is 40Gbps
+ */
 static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 {
        u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
        int err;
 
        err = mlx5e_port_linkspeed(priv->mdev, &speed);
-       if (err) {
-               mlx5_core_warn(priv->mdev, "cannot get port speed\n");
-               return 0;
-       }
+       if (err)
+               speed = SPEED_40000;
+       speed = max_t(u32, speed, SPEED_40000);
 
        xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
 
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 }
 
 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
-                                u32 xoff, unsigned int mtu)
+                                u32 xoff, unsigned int max_mtu)
 {
        int i;
 
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
                }
 
                if (port_buffer->buffer[i].size <
-                   (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+                   (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
                        return -ENOMEM;
 
                port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
-               port_buffer->buffer[i].xon  = port_buffer->buffer[i].xoff - mtu;
+               port_buffer->buffer[i].xon  =
+                       port_buffer->buffer[i].xoff - max_mtu;
        }
 
        return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
 
 /**
  * update_buffer_lossy()
- *   mtu: device's MTU
+ *   max_mtu: netdev's max_mtu
  *   pfc_en: <input> current pfc configuration
  *   buffer: <input> current prio to buffer mapping
  *   xoff:   <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     Return 0 if no error.
  *     Set change to true if buffer configuration is modified.
  */
-static int update_buffer_lossy(unsigned int mtu,
+static int update_buffer_lossy(unsigned int max_mtu,
                               u8 pfc_en, u8 *buffer, u32 xoff,
                               struct mlx5e_port_buffer *port_buffer,
                               bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
        }
 
        if (changed) {
-               err = update_xoff_threshold(port_buffer, xoff, mtu);
+               err = update_xoff_threshold(port_buffer, xoff, max_mtu);
                if (err)
                        return err;
 
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
        return 0;
 }
 
+#define MINIMUM_MAX_MTU 9216
 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                                    u32 change, unsigned int mtu,
                                    struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        bool update_prio2buffer = false;
        u8 buffer[MLX5E_MAX_PRIORITY];
        bool update_buffer = false;
+       unsigned int max_mtu;
        u32 total_used = 0;
        u8 curr_pfc_en;
        int err;
        int i;
 
        mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+       max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
 
        err = mlx5e_port_query_buffer(priv, &port_buffer);
        if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
 
        if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
                                          &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
-                                         &port_buffer, &update_buffer);
+               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+                                         xoff, &port_buffer, &update_buffer);
                if (err)
                        return err;
        }
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                        return -EINVAL;
 
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        /* Need to update buffer configuration if xoff value is changed */
        if (!update_buffer && xoff != priv->dcbx.xoff) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
index 3078491cc0d0678a6f1867373c752ed95496a59e..1539cf3de5dc97a180d7bdcb9fe2c5ec79db93c4 100644 (file)
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
        if (err)
                return err;
 
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
        return 0;
 }
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
                       struct mlx5e_tir *tir)
 {
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        mlx5_core_destroy_tir(mdev, tir->tirn);
        list_del(&tir->list);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 }
 
 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
        }
 
        INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+       mutex_init(&mdev->mlx5e_res.td.list_lock);
 
        return 0;
 
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5e_tir *tir;
-       int err  = -ENOMEM;
+       int err  = 0;
        u32 tirn = 0;
        int inlen;
        void *in;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
+       if (!in) {
+               err = -ENOMEM;
                goto out;
+       }
 
        if (enable_uc_lb)
                MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
 
        MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
 
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
                tirn = tir->tirn;
                err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
        kvfree(in);
        if (err)
                netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
        return err;
 }
index a0987cc5fe4a12af0bf0155ad8f290153898518c..5efce4a3ff7965ab6899e8c2521a96c2e8f33043 100644 (file)
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
-static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
-                                   unsigned long *advertising_modes,
-                                   u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+                                   u32 eth_proto_cap, bool ext)
 {
        unsigned long proto_cap = eth_proto_cap;
        struct ptys2ethtool_config *table;
        u32 max_size;
        int proto;
 
-       mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+       table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+       max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+                        ARRAY_SIZE(ptys2legacy_ethtool_table);
+
        for_each_set_bit(proto, &proto_cap, max_size)
                bitmap_or(advertising_modes, advertising_modes,
                          table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
        ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
 }
 
-static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
-                           u8 tx_pause, u8 rx_pause,
-                           struct ethtool_link_ksettings *link_ksettings)
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+                           struct ethtool_link_ksettings *link_ksettings,
+                           bool ext)
 {
        unsigned long *advertising = link_ksettings->link_modes.advertising;
-       ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
+       ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
 
        if (rx_pause)
                ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
                               struct ethtool_link_ksettings *link_ksettings)
 {
        unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
 
-       ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
+       ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
 }
 
 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
        u8 an_disable_admin;
        u8 an_status;
        u8 connector_type;
+       bool admin_ext;
        bool ext;
        int err;
 
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                                              eth_proto_capability);
        eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_admin);
+       /* Fields: eth_proto_admin and ext_eth_proto_admin  are
+        * mutually exclusive. Hence try reading legacy advertising
+        * when extended advertising is zero.
+        * admin_ext indicates how eth_proto_admin should be
+        * interpreted
+        */
+       admin_ext = ext;
+       if (ext && !eth_proto_admin) {
+               eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
+                                                     eth_proto_admin);
+               admin_ext = false;
+       }
+
        eth_proto_oper   = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_oper);
        eth_proto_lp        = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
        ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
 
        get_supported(mdev, eth_proto_cap, link_ksettings);
-       get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+       get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+                       admin_ext);
        get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
 
        eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
 
 #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
 
-       ext_requested = (link_ksettings->link_modes.advertising[0] >
-                       MLX5E_PTYS_EXT);
+       ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+                       MLX5E_PTYS_EXT ||
+                       link_ksettings->link_modes.advertising[1]);
        ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
-
-       /*when ptys_extended_ethernet is set legacy link modes are deprecated */
-       if (ext_requested != ext_supported)
-               return -EPROTONOSUPPORT;
+       ext_requested &= ext_supported;
 
        speed = link_ksettings->base.speed;
        ethtool2ptys_adver_func = ext_requested ?
                                  mlx5e_ethtool2ptys_ext_adver_link :
                                  mlx5e_ethtool2ptys_adver_link;
-       err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
+       err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
        if (err) {
                netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
                           __func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
        if (!an_changes && link_modes == eproto.admin)
                goto out;
 
-       mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
+       mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
        mlx5_toggle_port_link(mdev);
 
 out:
index b4967a0ff8c7ba6ce51826e4ae61964a52bfebb5..d75dc44eb2ff63482820cd4d317af4577fcc1fbd 100644 (file)
@@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
        return true;
 }
 
+struct ip_ttl_word {
+       __u8    ttl;
+       __u8    protocol;
+       __sum16 check;
+};
+
+struct ipv6_hoplimit_word {
+       __be16  payload_len;
+       __u8    nexthdr;
+       __u8    hop_limit;
+};
+
+static bool is_action_keys_supported(const struct flow_action_entry *act)
+{
+       u32 mask, offset;
+       u8 htype;
+
+       htype = act->mangle.htype;
+       offset = act->mangle.offset;
+       mask = ~act->mangle.mask;
+       /* For IPv4 & IPv6 header check 4 byte word,
+        * to determine that modified fields
+        * are NOT ttl & hop_limit only.
+        */
+       if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+               struct ip_ttl_word *ttl_word =
+                       (struct ip_ttl_word *)&mask;
+
+               if (offset != offsetof(struct iphdr, ttl) ||
+                   ttl_word->protocol ||
+                   ttl_word->check) {
+                       return true;
+               }
+       } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+               struct ipv6_hoplimit_word *hoplimit_word =
+                       (struct ipv6_hoplimit_word *)&mask;
+
+               if (offset != offsetof(struct ipv6hdr, payload_len) ||
+                   hoplimit_word->payload_len ||
+                   hoplimit_word->nexthdr) {
+                       return true;
+               }
+       }
+       return false;
+}
+
 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                                          struct flow_action *flow_action,
                                          u32 actions,
@@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
 {
        const struct flow_action_entry *act;
        bool modify_ip_header;
-       u8 htype, ip_proto;
        void *headers_v;
        u16 ethertype;
+       u8 ip_proto;
        int i;
 
        if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                    act->id != FLOW_ACTION_ADD)
                        continue;
 
-               htype = act->mangle.htype;
-               if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
-                   htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+               if (is_action_keys_supported(act)) {
                        modify_ip_header = true;
                        break;
                }
@@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
        return 0;
 }
 
-static inline int cmp_encap_info(struct ip_tunnel_key *a,
-                                struct ip_tunnel_key *b)
+struct encap_key {
+       struct ip_tunnel_key *ip_tun_key;
+       int tunnel_type;
+};
+
+static inline int cmp_encap_info(struct encap_key *a,
+                                struct encap_key *b)
 {
-       return memcmp(a, b, sizeof(*a));
+       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+              a->tunnel_type != b->tunnel_type;
 }
 
-static inline int hash_encap_info(struct ip_tunnel_key *key)
+static inline int hash_encap_info(struct encap_key *key)
 {
-       return jhash(key, sizeof(*key), 0);
+       return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+                    key->tunnel_type);
 }
 
 
@@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
        struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct ip_tunnel_info *tun_info;
-       struct ip_tunnel_key *key;
+       struct encap_key key, e_key;
        struct mlx5e_encap_entry *e;
        unsigned short family;
        uintptr_t hash_key;
@@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        parse_attr = attr->parse_attr;
        tun_info = &parse_attr->tun_info[out_index];
        family = ip_tunnel_info_af(tun_info);
-       key = &tun_info->key;
+       key.ip_tun_key = &tun_info->key;
+       key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
 
-       hash_key = hash_encap_info(key);
+       hash_key = hash_encap_info(&key);
 
        hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
                                   encap_hlist, hash_key) {
-               if (!cmp_encap_info(&e->tun_info.key, key)) {
+               e_key.ip_tun_key = &e->tun_info.key;
+               e_key.tunnel_type = e->tunnel_type;
+               if (!cmp_encap_info(&e_key, &key)) {
                        found = true;
                        break;
                }
@@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
 
        if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
            hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
-               err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+               err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
                                            parse_attr, hdrs, extack);
                if (err)
                        return err;
index ecd2c747f7260306fd972478ecce71610918e3b3..8a67fd197b7923f67af1872eae280e0f5e3eb663 100644 (file)
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
                 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
        MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
        MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-       if (vport)
-               MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
        nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
                                     in, nic_vport_context);
 
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
        MLX5_SET(modify_esw_vport_context_in, in, opcode,
                 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
        MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
-       if (vport)
-               MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+       MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
        return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 }
 
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
 {
        int err;
 
+       memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+
        err = esw_create_legacy_vepa_table(esw);
        if (err)
                return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
 
        /* Star rule to forward all traffic to uplink vport */
        memset(spec, 0, sizeof(*spec));
+       memset(&dest, 0, sizeof(dest));
        dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
        dest.vport.num = MLX5_VPORT_UPLINK;
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
index f2260391be5b952478175de9be8235b3154c4ccf..9b2d78ee22b88333fcc0b043d4234c340dcb3c32 100644 (file)
@@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
 {
        int err;
 
+       memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
        mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
 
        err = esw_create_offloads_fdb_tables(esw, nvports);
index 5cf5f2a9d51fec724f4fac709e29e40f4110d5f7..8de64e88c670ccb850d5a84ddfb653a1ef7c300d 100644 (file)
@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
        void *cmd;
        int ret;
 
+       rcu_read_lock();
+       flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+       rcu_read_unlock();
+
+       if (!flow) {
+               WARN_ONCE(1, "Received NULL pointer for handle\n");
+               return -EINVAL;
+       }
+
        buf = kzalloc(size, GFP_ATOMIC);
        if (!buf)
                return -ENOMEM;
 
        cmd = (buf + 1);
 
-       rcu_read_lock();
-       flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
-       rcu_read_unlock();
        mlx5_fpga_tls_flow_to_cmd(flow, cmd);
 
        MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
        buf->complete = mlx_tls_kfree_complete;
 
        ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+       if (ret < 0)
+               kfree(buf);
 
        return ret;
 }
index 70cc906a102b2dde87d161385126f43da4948266..76716419370df9738384675c3dc36e5d0ff6cf27 100644 (file)
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
                        .size   = 8,
                        .limit  = 4
                },
-               .mr_cache[16]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[17]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[18]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[19]   = {
-                       .size   = 4,
-                       .limit  = 2
-               },
-               .mr_cache[20]   = {
-                       .size   = 4,
-                       .limit  = 2
-               },
        },
 };
 
index eeda4ed98333afbf5671c1fd6d4ad69bc443a9e4..e336f6ee94f5c6d3a9108b85a6fb0529ecb7031d 100644 (file)
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
 
        tmp_push_vlan_tci =
                FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
-               FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
-               NFP_FL_PUSH_VLAN_CFI;
+               FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
        push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
 }
 
index 4fcaf11ed56ed6f2ee89dde5313962676fba9f92..0ed51e79db00ebb90f0f9e8b2d9e74631c7ed4dc 100644 (file)
@@ -26,7 +26,7 @@
 #define NFP_FLOWER_LAYER2_GENEVE_OP    BIT(6)
 
 #define NFP_FLOWER_MASK_VLAN_PRIO      GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI       BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT   BIT(12)
 #define NFP_FLOWER_MASK_VLAN_VID       GENMASK(11, 0)
 
 #define NFP_FLOWER_MASK_MPLS_LB                GENMASK(31, 12)
@@ -82,7 +82,6 @@
 #define NFP_FL_OUT_FLAGS_TYPE_IDX      GENMASK(2, 0)
 
 #define NFP_FL_PUSH_VLAN_PRIO          GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI           BIT(12)
 #define NFP_FL_PUSH_VLAN_VID           GENMASK(11, 0)
 
 #define IPV6_FLOW_LABEL_MASK           cpu_to_be32(0x000fffff)
index e03c8ef2c28c525b7fb44b4ce8f3e294d3d4fef6..9b8b843d0340374a6372501e2c69eb298563be75 100644 (file)
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
 
                flow_rule_match_vlan(rule, &match);
                /* Populate the tci field. */
-               if (match.key->vlan_id || match.key->vlan_priority) {
-                       tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-                                            match.key->vlan_priority) |
-                                 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-                                            match.key->vlan_id) |
-                                 NFP_FLOWER_MASK_VLAN_CFI;
-                       ext->tci = cpu_to_be16(tmp_tci);
-                       tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-                                            match.mask->vlan_priority) |
-                                 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-                                            match.mask->vlan_id) |
-                                 NFP_FLOWER_MASK_VLAN_CFI;
-                       msk->tci = cpu_to_be16(tmp_tci);
-               }
+               tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+               tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+                                     match.key->vlan_priority) |
+                          FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+                                     match.key->vlan_id);
+               ext->tci = cpu_to_be16(tmp_tci);
+
+               tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+               tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+                                     match.mask->vlan_priority) |
+                          FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+                                     match.mask->vlan_id);
+               msk->tci = cpu_to_be16(tmp_tci);
        }
 }
 
index d2c803bb4e562dd3805aaea86e689e5f682ce9ed..94d228c044963b8494c737aa1c46700d3598fd11 100644 (file)
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
        ret = dev_queue_xmit(skb);
        nfp_repr_inc_tx_stats(netdev, len, ret);
 
-       return ret;
+       return NETDEV_TX_OK;
 }
 
 static int nfp_repr_stop(struct net_device *netdev)
@@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
        netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
        netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
 
-       netdev->priv_flags |= IFF_NO_QUEUE;
+       netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
        netdev->features |= NETIF_F_LLTX;
 
        if (nfp_app_has_tc(app)) {
index 7562ccbbb39af59a2ba0e4078b2f43b2a7376809..19efa88f3f02d4636f6711f6c360977953aac2f5 100644 (file)
@@ -5460,7 +5460,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
        tp->cp_cmd |= PktCntrDisable | INTT_1;
        RTL_W16(tp, CPlusCmd, tp->cp_cmd);
 
-       RTL_W16(tp, IntrMitigate, 0x5151);
+       RTL_W16(tp, IntrMitigate, 0x5100);
 
        /* Work around for RxFIFO overflow. */
        if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
index 40d6356a7e73c213f0d1d073387b8605bb4f3726..3dfb07a78952533420da7cb1cb6b87b171d91661 100644 (file)
 /* Specific functions used for Ring mode */
 
 /* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+                                          int bfsize)
 {
-       p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
-                       << ERDES1_BUFFER2_SIZE_SHIFT)
-                  & ERDES1_BUFFER2_SIZE_MASK);
+       if (bfsize == BUF_SIZE_16KiB)
+               p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+                               << ERDES1_BUFFER2_SIZE_SHIFT)
+                          & ERDES1_BUFFER2_SIZE_MASK);
 
        if (end)
                p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
 }
 
 /* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
 {
-       p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
-                               << RDES1_BUFFER2_SIZE_SHIFT)
-                   & RDES1_BUFFER2_SIZE_MASK);
+       if (bfsize >= BUF_SIZE_2KiB) {
+               int bfsize2;
+
+               bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+               p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+                           & RDES1_BUFFER2_SIZE_MASK);
+       }
 
        if (end)
                p->des1 |= cpu_to_le32(RDES1_END_RING);
index 7fbb6a4dbf5107723f16b825e7a3b1577c97254a..e061e9f5fad71f065440605646da812d1b9daf51 100644 (file)
@@ -296,7 +296,7 @@ exit:
 }
 
 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                  int mode, int end)
+                                  int mode, int end, int bfsize)
 {
        dwmac4_set_rx_owner(p, disable_rx_ic);
 }
index 1d858fdec99718ec63a5fa1064fe9dd99670d2e0..98fa471da7c0f2764729f98c7044f52e068c1db9 100644 (file)
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
 }
 
 static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                 int mode, int end)
+                                 int mode, int end, int bfsize)
 {
        dwxgmac2_set_rx_owner(p, disable_rx_ic);
 }
index 5ef91a790f9d16fbd122f71e130cf7ecf5249a68..5202d6ad79194b0ed9134a7905d0aaa4309c6822 100644 (file)
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
        if (unlikely(rdes0 & RDES0_OWN))
                return dma_own;
 
+       if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+               stats->rx_length_errors++;
+               return discard_frame;
+       }
+
        if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
                if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
                        x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
         * It doesn't match with the information reported into the databook.
         * At any rate, we need to understand if the CSUM hw computation is ok
         * and report this info to the upper layers. */
-       ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
-                                !!(rdes0 & RDES0_FRAME_TYPE),
-                                !!(rdes0 & ERDES0_RX_MAC_ADDR));
+       if (likely(ret == good_frame))
+               ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+                                        !!(rdes0 & RDES0_FRAME_TYPE),
+                                        !!(rdes0 & ERDES0_RX_MAC_ADDR));
 
        if (unlikely(rdes0 & RDES0_DRIBBLING))
                x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                 int mode, int end)
+                                 int mode, int end, int bfsize)
 {
+       int bfsize1;
+
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+       bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+       p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ehn_desc_rx_set_on_chain(p);
        else
-               ehn_desc_rx_set_on_ring(p, end);
+               ehn_desc_rx_set_on_ring(p, end, bfsize);
 
        if (disable_rx_ic)
                p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
index 92b8944f26e3c8566d9e68de4820d06231ca10bd..5bb00234d961c6a5a2385c90bc3fdf54ff96e4ca 100644 (file)
@@ -33,7 +33,7 @@ struct dma_extended_desc;
 struct stmmac_desc_ops {
        /* DMA RX descriptor ring initialization */
        void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
-                       int end);
+                       int end, int bfsize);
        /* DMA TX descriptor ring initialization */
        void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
        /* Invoked by the xmit function to prepare the tx descriptor */
index de65bb29feba967cc7a0d6ff3184998f359dde34..b7dd4e3c760d82da1439fb0d8dd9d418c34256a2 100644 (file)
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
                return dma_own;
 
        if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
-               pr_warn("%s: Oversized frame spanned multiple buffers\n",
-                       __func__);
                stats->rx_length_errors++;
                return discard_frame;
        }
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
-                              int end)
+                              int end, int bfsize)
 {
+       int bfsize1;
+
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+       bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+       p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ndesc_rx_set_on_chain(p, end);
        else
-               ndesc_rx_set_on_ring(p, end);
+               ndesc_rx_set_on_ring(p, end, bfsize);
 
        if (disable_rx_ic)
                p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
index 6a2e1031a62ae3c4d16f7f09f4d09481ccfa325d..a26e36dbb5df0deff58ef23a18df287c0ceef330 100644 (file)
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
                if (priv->extend_desc)
                        stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
                                        priv->use_riwt, priv->mode,
-                                       (i == DMA_RX_SIZE - 1));
+                                       (i == DMA_RX_SIZE - 1),
+                                       priv->dma_buf_sz);
                else
                        stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
                                        priv->use_riwt, priv->mode,
-                                       (i == DMA_RX_SIZE - 1));
+                                       (i == DMA_RX_SIZE - 1),
+                                       priv->dma_buf_sz);
 }
 
 /**
@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        struct stmmac_channel *ch = &priv->channel[queue];
-       unsigned int entry = rx_q->cur_rx;
+       unsigned int next_entry = rx_q->cur_rx;
        int coe = priv->hw->rx_csum;
-       unsigned int next_entry;
        unsigned int count = 0;
        bool xmac;
 
@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
        }
        while (count < limit) {
-               int status;
+               int entry, status;
                struct dma_desc *p;
                struct dma_desc *np;
 
+               entry = next_entry;
+
                if (priv->extend_desc)
                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
                else
@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                         *  ignored
                         */
                        if (frame_len > priv->dma_buf_sz) {
-                               netdev_err(priv->dev,
-                                          "len %d larger than size (%d)\n",
-                                          frame_len, priv->dma_buf_sz);
+                               if (net_ratelimit())
+                                       netdev_err(priv->dev,
+                                                  "len %d larger than size (%d)\n",
+                                                  frame_len, priv->dma_buf_sz);
                                priv->dev->stats.rx_length_errors++;
-                               break;
+                               continue;
                        }
 
                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                                                dev_warn(priv->device,
                                                         "packet dropped\n");
                                        priv->dev->stats.rx_dropped++;
-                                       break;
+                                       continue;
                                }
 
                                dma_sync_single_for_cpu(priv->device,
@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        } else {
                                skb = rx_q->rx_skbuff[entry];
                                if (unlikely(!skb)) {
-                                       netdev_err(priv->dev,
-                                                  "%s: Inconsistent Rx chain\n",
-                                                  priv->dev->name);
+                                       if (net_ratelimit())
+                                               netdev_err(priv->dev,
+                                                          "%s: Inconsistent Rx chain\n",
+                                                          priv->dev->name);
                                        priv->dev->stats.rx_dropped++;
-                                       break;
+                                       continue;
                                }
                                prefetch(skb->data - NET_IP_ALIGN);
                                rx_q->rx_skbuff[entry] = NULL;
@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        priv->dev->stats.rx_packets++;
                        priv->dev->stats.rx_bytes += frame_len;
                }
-               entry = next_entry;
        }
 
        stmmac_rx_refill(priv, queue);
index e859ae2e42d5a152a567de048e898eeafa99fcb5..49f41b64077bb9877d74ea40bbddffea5fe835d4 100644 (file)
@@ -987,6 +987,7 @@ struct netvsc_device {
 
        wait_queue_head_t wait_drain;
        bool destroy;
+       bool tx_disable; /* if true, do not wake up queue again */
 
        /* Receive buffer allocated by us but manages by NetVSP */
        void *recv_buf;
index 813d195bbd57fed2ef96ea708b637ca8197be458..e0dce373cdd9d875ded78bff545d76b32920f69c 100644 (file)
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_waitqueue_head(&net_device->wait_drain);
        net_device->destroy = false;
+       net_device->tx_disable = false;
 
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
        } else {
                struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
 
-               if (netif_tx_queue_stopped(txq) &&
+               if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
                    (hv_get_avail_to_write_percent(&channel->outbound) >
                     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
                        netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
        } else if (ret == -EAGAIN) {
                netif_tx_stop_queue(txq);
                ndev_ctx->eth_stats.stop_queue++;
-               if (atomic_read(&nvchan->queue_sends) < 1) {
+               if (atomic_read(&nvchan->queue_sends) < 1 &&
+                   !net_device->tx_disable) {
                        netif_tx_wake_queue(txq);
                        ndev_ctx->eth_stats.wake_queue++;
                        ret = -ENOSPC;
index cf4897043e833618b5f5d6d452914ddbeff0e00d..b20fb0fb595bde3d7455e2c6214812fef6732017 100644 (file)
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
        rcu_read_unlock();
 }
 
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+                            struct net_device *ndev)
+{
+       nvscdev->tx_disable = false;
+       virt_wmb(); /* ensure queue wake up mechanism is on */
+
+       netif_tx_wake_all_queues(ndev);
+}
+
 static int netvsc_open(struct net_device *net)
 {
        struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
        rdev = nvdev->extension;
        if (!rdev->link_state) {
                netif_carrier_on(net);
-               netif_tx_wake_all_queues(net);
+               netvsc_tx_enable(nvdev, net);
        }
 
        if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
        }
 }
 
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+                             struct net_device *ndev)
+{
+       if (nvscdev) {
+               nvscdev->tx_disable = true;
+               virt_wmb(); /* ensure txq will not wake up after stop */
+       }
+
+       netif_tx_disable(ndev);
+}
+
 static int netvsc_close(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
        struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        int ret;
 
-       netif_tx_disable(net);
+       netvsc_tx_disable(nvdev, net);
 
        /* No need to close rndis filter if it is removed already */
        if (!nvdev)
@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
 
        /* If device was up (receiving) then shutdown */
        if (netif_running(ndev)) {
-               netif_tx_disable(ndev);
+               netvsc_tx_disable(nvdev, ndev);
 
                ret = rndis_filter_close(nvdev);
                if (ret) {
@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (rdev->link_state) {
                        rdev->link_state = false;
                        netif_carrier_on(net);
-                       netif_tx_wake_all_queues(net);
+                       netvsc_tx_enable(net_device, net);
                } else {
                        notify = true;
                }
@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                }
                kfree(event);
                break;
@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                        event->event = RNDIS_STATUS_MEDIA_CONNECT;
                        spin_lock_irqsave(&ndev_ctx->lock, flags);
                        list_add(&event->list, &ndev_ctx->reconfig_events);
index 74bebbdb4b158791135410d00591e84eb9da7073..9195f3476b1d7924de0acedab5c90d2f22533e5a 100644 (file)
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
        {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
        {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
+       {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
        {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
index 7c1430ed02445b6e6f13c663b555ef550276c899..6d1a1abbed27e161f173f63ccb5aba5c6dca18ed 100644 (file)
@@ -1273,6 +1273,7 @@ static void vrf_setup(struct net_device *dev)
 
        /* default to no qdisc; user can add if desired */
        dev->priv_flags |= IFF_NO_QUEUE;
+       dev->priv_flags |= IFF_NO_RX_HANDLER;
 
        dev->min_mtu = 0;
        dev->max_mtu = 0;
index 38b741aef0bf5a93513f1ad1f8ab61b9de7c8078..a4cc2a1cccb7516216b23c4fe34431996616e3c3 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,7 +181,7 @@ struct poll_iocb {
        struct file             *file;
        struct wait_queue_head  *head;
        __poll_t                events;
-       bool                    woken;
+       bool                    done;
        bool                    cancelled;
        struct wait_queue_entry wait;
        struct work_struct      work;
@@ -204,8 +204,7 @@ struct aio_kiocb {
        struct kioctx           *ki_ctx;
        kiocb_cancel_fn         *ki_cancel;
 
-       struct iocb __user      *ki_user_iocb;  /* user's aiocb */
-       __u64                   ki_user_data;   /* user's data for completion */
+       struct io_event         ki_res;
 
        struct list_head        ki_list;        /* the aio core uses this
                                                 * for cancellation */
@@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
 /* aio_get_req
  *     Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
+ *
+ * The refcount is initialized to 2 - one for the async op completion,
+ * one for the synchronous code that does this.
  */
 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
 {
@@ -1031,10 +1033,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
        if (unlikely(!req))
                return NULL;
 
+       if (unlikely(!get_reqs_available(ctx))) {
+               kfree(req);
+               return NULL;
+       }
+
        percpu_ref_get(&ctx->reqs);
        req->ki_ctx = ctx;
        INIT_LIST_HEAD(&req->ki_list);
-       refcount_set(&req->ki_refcnt, 0);
+       refcount_set(&req->ki_refcnt, 2);
        req->ki_eventfd = NULL;
        return req;
 }
@@ -1067,30 +1074,20 @@ out:
        return ret;
 }
 
-static inline void iocb_put(struct aio_kiocb *iocb)
-{
-       if (refcount_read(&iocb->ki_refcnt) == 0 ||
-           refcount_dec_and_test(&iocb->ki_refcnt)) {
-               if (iocb->ki_filp)
-                       fput(iocb->ki_filp);
-               percpu_ref_put(&iocb->ki_ctx->reqs);
-               kmem_cache_free(kiocb_cachep, iocb);
-       }
-}
-
-static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
-                          long res, long res2)
+static inline void iocb_destroy(struct aio_kiocb *iocb)
 {
-       ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
-       ev->data = iocb->ki_user_data;
-       ev->res = res;
-       ev->res2 = res2;
+       if (iocb->ki_eventfd)
+               eventfd_ctx_put(iocb->ki_eventfd);
+       if (iocb->ki_filp)
+               fput(iocb->ki_filp);
+       percpu_ref_put(&iocb->ki_ctx->reqs);
+       kmem_cache_free(kiocb_cachep, iocb);
 }
 
 /* aio_complete
  *     Called when the io request on the given iocb is complete.
  */
-static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb)
 {
        struct kioctx   *ctx = iocb->ki_ctx;
        struct aio_ring *ring;
@@ -1114,14 +1111,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
        ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
        event = ev_page + pos % AIO_EVENTS_PER_PAGE;
 
-       aio_fill_event(event, iocb, res, res2);
+       *event = iocb->ki_res;
 
        kunmap_atomic(ev_page);
        flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
 
-       pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
-                ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
-                res, res2);
+       pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
+                (void __user *)(unsigned long)iocb->ki_res.obj,
+                iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
 
        /* after flagging the request as done, we
         * must never even look at it again
@@ -1148,10 +1145,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
         * eventfd. The eventfd_signal() function is safe to be called
         * from IRQ context.
         */
-       if (iocb->ki_eventfd) {
+       if (iocb->ki_eventfd)
                eventfd_signal(iocb->ki_eventfd, 1);
-               eventfd_ctx_put(iocb->ki_eventfd);
-       }
 
        /*
         * We have to order our ring_info tail store above and test
@@ -1163,7 +1158,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 
        if (waitqueue_active(&ctx->wait))
                wake_up(&ctx->wait);
-       iocb_put(iocb);
+}
+
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+       if (refcount_dec_and_test(&iocb->ki_refcnt)) {
+               aio_complete(iocb);
+               iocb_destroy(iocb);
+       }
 }
 
 /* aio_read_events_ring
@@ -1437,7 +1439,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
                file_end_write(kiocb->ki_filp);
        }
 
-       aio_complete(iocb, res, res2);
+       iocb->ki_res.res = res;
+       iocb->ki_res.res2 = res2;
+       iocb_put(iocb);
 }
 
 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
@@ -1514,13 +1518,13 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
        }
 }
 
-static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
+static int aio_read(struct kiocb *req, const struct iocb *iocb,
                        bool vectored, bool compat)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct iov_iter iter;
        struct file *file;
-       ssize_t ret;
+       int ret;
 
        ret = aio_prep_rw(req, iocb);
        if (ret)
@@ -1542,13 +1546,13 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
        return ret;
 }
 
-static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
+static int aio_write(struct kiocb *req, const struct iocb *iocb,
                         bool vectored, bool compat)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct iov_iter iter;
        struct file *file;
-       ssize_t ret;
+       int ret;
 
        ret = aio_prep_rw(req, iocb);
        if (ret)
@@ -1585,11 +1589,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
 
 static void aio_fsync_work(struct work_struct *work)
 {
-       struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
-       int ret;
+       struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
 
-       ret = vfs_fsync(req->file, req->datasync);
-       aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
+       iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+       iocb_put(iocb);
 }
 
 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
@@ -1608,11 +1611,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
        return 0;
 }
 
-static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-       aio_complete(iocb, mangle_poll(mask), 0);
-}
-
 static void aio_poll_complete_work(struct work_struct *work)
 {
        struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,9 +1636,11 @@ static void aio_poll_complete_work(struct work_struct *work)
                return;
        }
        list_del_init(&iocb->ki_list);
+       iocb->ki_res.res = mangle_poll(mask);
+       req->done = true;
        spin_unlock_irq(&ctx->ctx_lock);
 
-       aio_poll_complete(iocb, mask);
+       iocb_put(iocb);
 }
 
 /* assumes we are called with irqs disabled */
@@ -1668,31 +1668,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        __poll_t mask = key_to_poll(key);
        unsigned long flags;
 
-       req->woken = true;
-
        /* for instances that support it check for an event match first: */
-       if (mask) {
-               if (!(mask & req->events))
-                       return 0;
+       if (mask && !(mask & req->events))
+               return 0;
+
+       list_del_init(&req->wait.entry);
 
+       if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
                /*
                 * Try to complete the iocb inline if we can. Use
                 * irqsave/irqrestore because not all filesystems (e.g. fuse)
                 * call this function with IRQs disabled and because IRQs
                 * have to be disabled before ctx_lock is obtained.
                 */
-               if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
-                       list_del(&iocb->ki_list);
-                       spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
-
-                       list_del_init(&req->wait.entry);
-                       aio_poll_complete(iocb, mask);
-                       return 1;
-               }
+               list_del(&iocb->ki_list);
+               iocb->ki_res.res = mangle_poll(mask);
+               req->done = true;
+               spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+               iocb_put(iocb);
+       } else {
+               schedule_work(&req->work);
        }
-
-       list_del_init(&req->wait.entry);
-       schedule_work(&req->work);
        return 1;
 }
 
@@ -1719,11 +1715,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
        add_wait_queue(head, &pt->iocb->poll.wait);
 }
 
-static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
+static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
 {
        struct kioctx *ctx = aiocb->ki_ctx;
        struct poll_iocb *req = &aiocb->poll;
        struct aio_poll_table apt;
+       bool cancel = false;
        __poll_t mask;
 
        /* reject any unknown events outside the normal event mask. */
@@ -1737,7 +1734,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
 
        req->head = NULL;
-       req->woken = false;
+       req->done = false;
        req->cancelled = false;
 
        apt.pt._qproc = aio_poll_queue_proc;
@@ -1749,156 +1746,135 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        INIT_LIST_HEAD(&req->wait.entry);
        init_waitqueue_func_entry(&req->wait, aio_poll_wake);
 
-       /* one for removal from waitqueue, one for this function */
-       refcount_set(&aiocb->ki_refcnt, 2);
-
        mask = vfs_poll(req->file, &apt.pt) & req->events;
-       if (unlikely(!req->head)) {
-               /* we did not manage to set up a waitqueue, done */
-               goto out;
-       }
-
        spin_lock_irq(&ctx->ctx_lock);
-       spin_lock(&req->head->lock);
-       if (req->woken) {
-               /* wake_up context handles the rest */
-               mask = 0;
+       if (likely(req->head)) {
+               spin_lock(&req->head->lock);
+               if (unlikely(list_empty(&req->wait.entry))) {
+                       if (apt.error)
+                               cancel = true;
+                       apt.error = 0;
+                       mask = 0;
+               }
+               if (mask || apt.error) {
+                       list_del_init(&req->wait.entry);
+               } else if (cancel) {
+                       WRITE_ONCE(req->cancelled, true);
+               } else if (!req->done) { /* actually waiting for an event */
+                       list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+                       aiocb->ki_cancel = aio_poll_cancel;
+               }
+               spin_unlock(&req->head->lock);
+       }
+       if (mask) { /* no async, we'd stolen it */
+               aiocb->ki_res.res = mangle_poll(mask);
                apt.error = 0;
-       } else if (mask || apt.error) {
-               /* if we get an error or a mask we are done */
-               WARN_ON_ONCE(list_empty(&req->wait.entry));
-               list_del_init(&req->wait.entry);
-       } else {
-               /* actually waiting for an event */
-               list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-               aiocb->ki_cancel = aio_poll_cancel;
        }
-       spin_unlock(&req->head->lock);
        spin_unlock_irq(&ctx->ctx_lock);
-
-out:
-       if (unlikely(apt.error))
-               return apt.error;
-
        if (mask)
-               aio_poll_complete(aiocb, mask);
-       iocb_put(aiocb);
-       return 0;
+               iocb_put(aiocb);
+       return apt.error;
 }
 
 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
-                          struct iocb __user *user_iocb, bool compat)
+                          struct iocb __user *user_iocb, struct aio_kiocb *req,
+                          bool compat)
 {
-       struct aio_kiocb *req;
-       ssize_t ret;
-
-       /* enforce forwards compatibility on users */
-       if (unlikely(iocb->aio_reserved2)) {
-               pr_debug("EINVAL: reserve field set\n");
-               return -EINVAL;
-       }
-
-       /* prevent overflows */
-       if (unlikely(
-           (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
-           (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
-           ((ssize_t)iocb->aio_nbytes < 0)
-          )) {
-               pr_debug("EINVAL: overflow check\n");
-               return -EINVAL;
-       }
-
-       if (!get_reqs_available(ctx))
-               return -EAGAIN;
-
-       ret = -EAGAIN;
-       req = aio_get_req(ctx);
-       if (unlikely(!req))
-               goto out_put_reqs_available;
-
        req->ki_filp = fget(iocb->aio_fildes);
-       ret = -EBADF;
        if (unlikely(!req->ki_filp))
-               goto out_put_req;
+               return -EBADF;
 
        if (iocb->aio_flags & IOCB_FLAG_RESFD) {
+               struct eventfd_ctx *eventfd;
                /*
                 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
                 * instance of the file* now. The file descriptor must be
                 * an eventfd() fd, and will be signaled for each completed
                 * event using the eventfd_signal() function.
                 */
-               req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
-               if (IS_ERR(req->ki_eventfd)) {
-                       ret = PTR_ERR(req->ki_eventfd);
-                       req->ki_eventfd = NULL;
-                       goto out_put_req;
-               }
+               eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
+               if (IS_ERR(eventfd))
+                       return PTR_ERR(req->ki_eventfd);
+
+               req->ki_eventfd = eventfd;
        }
 
-       ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
-       if (unlikely(ret)) {
+       if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
                pr_debug("EFAULT: aio_key\n");
-               goto out_put_req;
+               return -EFAULT;
        }
 
-       req->ki_user_iocb = user_iocb;
-       req->ki_user_data = iocb->aio_data;
+       req->ki_res.obj = (u64)(unsigned long)user_iocb;
+       req->ki_res.data = iocb->aio_data;
+       req->ki_res.res = 0;
+       req->ki_res.res2 = 0;
 
        switch (iocb->aio_lio_opcode) {
        case IOCB_CMD_PREAD:
-               ret = aio_read(&req->rw, iocb, false, compat);
-               break;
+               return aio_read(&req->rw, iocb, false, compat);
        case IOCB_CMD_PWRITE:
-               ret = aio_write(&req->rw, iocb, false, compat);
-               break;
+               return aio_write(&req->rw, iocb, false, compat);
        case IOCB_CMD_PREADV:
-               ret = aio_read(&req->rw, iocb, true, compat);
-               break;
+               return aio_read(&req->rw, iocb, true, compat);
        case IOCB_CMD_PWRITEV:
-               ret = aio_write(&req->rw, iocb, true, compat);
-               break;
+               return aio_write(&req->rw, iocb, true, compat);
        case IOCB_CMD_FSYNC:
-               ret = aio_fsync(&req->fsync, iocb, false);
-               break;
+               return aio_fsync(&req->fsync, iocb, false);
        case IOCB_CMD_FDSYNC:
-               ret = aio_fsync(&req->fsync, iocb, true);
-               break;
+               return aio_fsync(&req->fsync, iocb, true);
        case IOCB_CMD_POLL:
-               ret = aio_poll(req, iocb);
-               break;
+               return aio_poll(req, iocb);
        default:
                pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
-               ret = -EINVAL;
-               break;
+               return -EINVAL;
        }
-
-       /*
-        * If ret is 0, we'd either done aio_complete() ourselves or have
-        * arranged for that to be done asynchronously.  Anything non-zero
-        * means that we need to destroy req ourselves.
-        */
-       if (ret)
-               goto out_put_req;
-       return 0;
-out_put_req:
-       if (req->ki_eventfd)
-               eventfd_ctx_put(req->ki_eventfd);
-       iocb_put(req);
-out_put_reqs_available:
-       put_reqs_available(ctx, 1);
-       return ret;
 }
 
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         bool compat)
 {
+       struct aio_kiocb *req;
        struct iocb iocb;
+       int err;
 
        if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
                return -EFAULT;
 
-       return __io_submit_one(ctx, &iocb, user_iocb, compat);
+       /* enforce forwards compatibility on users */
+       if (unlikely(iocb.aio_reserved2)) {
+               pr_debug("EINVAL: reserve field set\n");
+               return -EINVAL;
+       }
+
+       /* prevent overflows */
+       if (unlikely(
+           (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
+           (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
+           ((ssize_t)iocb.aio_nbytes < 0)
+          )) {
+               pr_debug("EINVAL: overflow check\n");
+               return -EINVAL;
+       }
+
+       req = aio_get_req(ctx);
+       if (unlikely(!req))
+               return -EAGAIN;
+
+       err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
+
+       /* Done with the synchronous reference */
+       iocb_put(req);
+
+       /*
+        * If err is 0, we'd either done aio_complete() ourselves or have
+        * arranged for that to be done asynchronously.  Anything non-zero
+        * means that we need to destroy req ourselves.
+        */
+       if (unlikely(err)) {
+               iocb_destroy(req);
+               put_reqs_available(ctx, 1);
+       }
+       return err;
 }
 
 /* sys_io_submit:
@@ -1997,24 +1973,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
 }
 #endif
 
-/* lookup_kiocb
- *     Finds a given iocb for cancellation.
- */
-static struct aio_kiocb *
-lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
-{
-       struct aio_kiocb *kiocb;
-
-       assert_spin_locked(&ctx->ctx_lock);
-
-       /* TODO: use a hash or array, this sucks. */
-       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
-               if (kiocb->ki_user_iocb == iocb)
-                       return kiocb;
-       }
-       return NULL;
-}
-
 /* sys_io_cancel:
  *     Attempts to cancel an iocb previously passed to io_submit.  If
  *     the operation is successfully cancelled, the resulting event is
@@ -2032,6 +1990,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
        struct aio_kiocb *kiocb;
        int ret = -EINVAL;
        u32 key;
+       u64 obj = (u64)(unsigned long)iocb;
 
        if (unlikely(get_user(key, &iocb->aio_key)))
                return -EFAULT;
@@ -2043,10 +2002,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
                return -EINVAL;
 
        spin_lock_irq(&ctx->ctx_lock);
-       kiocb = lookup_kiocb(ctx, iocb);
-       if (kiocb) {
-               ret = kiocb->ki_cancel(&kiocb->rw);
-               list_del_init(&kiocb->ki_list);
+       /* TODO: use a hash or array, this sucks. */
+       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
+               if (kiocb->ki_res.obj == obj) {
+                       ret = kiocb->ki_cancel(&kiocb->rw);
+                       list_del_init(&kiocb->ki_list);
+                       break;
+               }
        }
        spin_unlock_irq(&ctx->ctx_lock);
 
index f9b71c12cc9f6d46267eaf73a801dd00715a9cf2..a05bf1d6e1d04143da40126e0a07ea927347001e 100644 (file)
@@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                        tcon->ses->server->echo_interval / HZ);
        if (tcon->snapshot_time)
                seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+       if (tcon->handle_timeout)
+               seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
        /* convert actimeo and display it in seconds */
        seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
 
index 38feae812b4704b315ee3adfe2a1eaa2c4740e45..5b18d45857409eb06624a894e2308e6728c39ba3 100644 (file)
  */
 #define CIFS_MAX_ACTIMEO (1 << 30)
 
+/*
+ * Max persistent and resilient handle timeout (milliseconds).
+ * Windows durable max was 960000 (16 minutes)
+ */
+#define SMB3_MAX_HANDLE_TIMEOUT 960000
+
 /*
  * MAX_REQ is the maximum number of requests that WE will send
  * on one socket concurrently.
@@ -586,6 +592,7 @@ struct smb_vol {
        struct nls_table *local_nls;
        unsigned int echo_interval; /* echo interval in secs */
        __u64 snapshot_time; /* needed for timewarp tokens */
+       __u32 handle_timeout; /* persistent and durable handle timeout in ms */
        unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
 };
 
@@ -1058,6 +1065,7 @@ struct cifs_tcon {
        __u32 vol_serial_number;
        __le64 vol_create_time;
        __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
+       __u32 handle_timeout; /* persistent and durable handle timeout in ms */
        __u32 ss_flags;         /* sector size flags */
        __u32 perf_sector_size; /* best sector size for perf */
        __u32 max_chunks;
index a8e9738db691294736105bf8a0cd03772a9a2447..4c0e44489f21497670131b5a889ae6f4eb0b843e 100644 (file)
@@ -103,7 +103,7 @@ enum {
        Opt_cruid, Opt_gid, Opt_file_mode,
        Opt_dirmode, Opt_port,
        Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
-       Opt_echo_interval, Opt_max_credits,
+       Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
        Opt_snapshot,
 
        /* Mount options which take string value */
@@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = {
        { Opt_rsize, "rsize=%s" },
        { Opt_wsize, "wsize=%s" },
        { Opt_actimeo, "actimeo=%s" },
+       { Opt_handletimeout, "handletimeout=%s" },
        { Opt_echo_interval, "echo_interval=%s" },
        { Opt_max_credits, "max_credits=%s" },
        { Opt_snapshot, "snapshot=%s" },
@@ -1619,6 +1620,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
 
        vol->actimeo = CIFS_DEF_ACTIMEO;
 
+       /* Most clients set timeout to 0, allows server to use its default */
+       vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
+
        /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
        vol->ops = &smb30_operations;
        vol->vals = &smbdefault_values;
@@ -2017,6 +2021,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                goto cifs_parse_mount_err;
                        }
                        break;
+               case Opt_handletimeout:
+                       if (get_option_ul(args, &option)) {
+                               cifs_dbg(VFS, "%s: Invalid handletimeout value\n",
+                                        __func__);
+                               goto cifs_parse_mount_err;
+                       }
+                       vol->handle_timeout = option;
+                       if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
+                               cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n");
+                               goto cifs_parse_mount_err;
+                       }
+                       break;
                case Opt_echo_interval:
                        if (get_option_ul(args, &option)) {
                                cifs_dbg(VFS, "%s: Invalid echo interval value\n",
@@ -3183,6 +3199,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
                return 0;
        if (tcon->snapshot_time != volume_info->snapshot_time)
                return 0;
+       if (tcon->handle_timeout != volume_info->handle_timeout)
+               return 0;
        return 1;
 }
 
@@ -3297,6 +3315,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
                        tcon->snapshot_time = volume_info->snapshot_time;
        }
 
+       if (volume_info->handle_timeout) {
+               if (ses->server->vals->protocol_id == 0) {
+                       cifs_dbg(VFS,
+                            "Use SMB2.1 or later for handle timeout option\n");
+                       rc = -EOPNOTSUPP;
+                       goto out_fail;
+               } else
+                       tcon->handle_timeout = volume_info->handle_timeout;
+       }
+
        tcon->ses = ses;
        if (volume_info->password) {
                tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
index b204e84b87fb52d938dc138379f7877ffb2ba74a..54bffb2a1786d00c5becdb9c2c275c0aa5f87c99 100644 (file)
@@ -68,13 +68,15 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
 
 
         if (oparms->tcon->use_resilient) {
-               nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */
+               /* default timeout is 0, servers pick default (120 seconds) */
+               nr_ioctl_req.Timeout =
+                       cpu_to_le32(oparms->tcon->handle_timeout);
                nr_ioctl_req.Reserved = 0;
                rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
                        fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
                        true /* is_fsctl */,
                        (char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
-                       NULL, NULL /* no return info */);
+                       CIFSMaxBufSize, NULL, NULL /* no return info */);
                if (rc == -EOPNOTSUPP) {
                        cifs_dbg(VFS,
                             "resiliency not supported by server, disabling\n");
index 1022a3771e140d819e767ba5a75677a88d65f911..00225e699d036c079441d53ee896e7abcdda1149 100644 (file)
@@ -581,7 +581,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
                        NULL /* no data input */, 0 /* no data input */,
-                       (char **)&out_buf, &ret_data_len);
+                       CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
        if (rc == -EOPNOTSUPP) {
                cifs_dbg(FYI,
                         "server does not support query network interfaces\n");
@@ -717,32 +717,28 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
 #endif /* CIFS_DEBUG2 */
 
-       if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
-               oplock = smb2_parse_lease_state(server, o_rsp,
-                                               &oparms.fid->epoch,
-                                               oparms.fid->lease_key);
-       else
-               goto oshr_exit;
-
-
        memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
        tcon->crfid.tcon = tcon;
        tcon->crfid.is_valid = true;
        kref_init(&tcon->crfid.refcount);
-       kref_get(&tcon->crfid.refcount);
 
+       if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
+               kref_get(&tcon->crfid.refcount);
+               oplock = smb2_parse_lease_state(server, o_rsp,
+                                               &oparms.fid->epoch,
+                                               oparms.fid->lease_key);
+       } else
+               goto oshr_exit;
 
        qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
        if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
                goto oshr_exit;
-       rc = smb2_validate_and_copy_iov(
+       if (!smb2_validate_and_copy_iov(
                                le16_to_cpu(qi_rsp->OutputBufferOffset),
                                sizeof(struct smb2_file_all_info),
                                &rsp_iov[1], sizeof(struct smb2_file_all_info),
-                               (char *)&tcon->crfid.file_all_info);
-       if (rc)
-               goto oshr_exit;
-       tcon->crfid.file_all_info_is_valid = 1;
+                               (char *)&tcon->crfid.file_all_info))
+               tcon->crfid.file_all_info_is_valid = 1;
 
  oshr_exit:
        mutex_unlock(&tcon->crfid.fid_mutex);
@@ -1299,7 +1295,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
                        FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
-                       NULL, 0 /* no input */,
+                       NULL, 0 /* no input */, CIFSMaxBufSize,
                        (char **)&res_key, &ret_data_len);
 
        if (rc) {
@@ -1404,7 +1400,7 @@ smb2_ioctl_query_info(const unsigned int xid,
                        rc = SMB2_ioctl_init(tcon, &rqst[1],
                                             COMPOUND_FID, COMPOUND_FID,
                                             qi.info_type, true, NULL,
-                                            0);
+                                            0, CIFSMaxBufSize);
                }
        } else if (qi.flags == PASSTHRU_QUERY_INFO) {
                memset(&qi_iov, 0, sizeof(qi_iov));
@@ -1532,8 +1528,8 @@ smb2_copychunk_range(const unsigned int xid,
                rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
                        trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
                        true /* is_fsctl */, (char *)pcchunk,
-                       sizeof(struct copychunk_ioctl), (char **)&retbuf,
-                       &ret_data_len);
+                       sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
+                       (char **)&retbuf, &ret_data_len);
                if (rc == 0) {
                        if (ret_data_len !=
                                        sizeof(struct copychunk_ioctl_rsp)) {
@@ -1693,7 +1689,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
                        true /* is_fctl */,
-                       &setsparse, 1, NULL, NULL);
+                       &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
        if (rc) {
                tcon->broken_sparse_sup = true;
                cifs_dbg(FYI, "set sparse rc = %d\n", rc);
@@ -1766,7 +1762,7 @@ smb2_duplicate_extents(const unsigned int xid,
                        true /* is_fsctl */,
                        (char *)&dup_ext_buf,
                        sizeof(struct duplicate_extents_to_file),
-                       NULL,
+                       CIFSMaxBufSize, NULL,
                        &ret_data_len);
 
        if (ret_data_len > 0)
@@ -1801,7 +1797,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
                        true /* is_fsctl */,
                        (char *)&integr_info,
                        sizeof(struct fsctl_set_integrity_information_req),
-                       NULL,
+                       CIFSMaxBufSize, NULL,
                        &ret_data_len);
 
 }
@@ -1809,6 +1805,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
 /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
 #define GMT_TOKEN_SIZE 50
 
+#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
+
 /*
  * Input buffer contains (empty) struct smb_snapshot array with size filled in
  * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
@@ -1820,13 +1818,29 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
        char *retbuf = NULL;
        unsigned int ret_data_len = 0;
        int rc;
+       u32 max_response_size;
        struct smb_snapshot_array snapshot_in;
 
+       if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
+               return -EFAULT;
+
+       /*
+        * Note that for snapshot queries that servers like Azure expect that
+        * the first query be minimal size (and just used to get the number/size
+        * of previous versions) so response size must be specified as EXACTLY
+        * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+        * of eight bytes.
+        */
+       if (ret_data_len == 0)
+               max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
+       else
+               max_response_size = CIFSMaxBufSize;
+
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid,
                        FSCTL_SRV_ENUMERATE_SNAPSHOTS,
                        true /* is_fsctl */,
-                       NULL, 0 /* no input data */,
+                       NULL, 0 /* no input data */, max_response_size,
                        (char **)&retbuf,
                        &ret_data_len);
        cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
@@ -2304,7 +2318,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
                rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                                FSCTL_DFS_GET_REFERRALS,
                                true /* is_fsctl */,
-                               (char *)dfs_req, dfs_req_size,
+                               (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
                                (char **)&dfs_rsp, &dfs_rsp_size);
        } while (rc == -EAGAIN);
 
@@ -2658,7 +2672,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
        rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid,
                             cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
                             true /* is_fctl */, (char *)&fsctl_buf,
-                            sizeof(struct file_zero_data_information));
+                            sizeof(struct file_zero_data_information),
+                            CIFSMaxBufSize);
        if (rc)
                goto zero_range_exit;
 
@@ -2735,7 +2750,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
                        true /* is_fctl */, (char *)&fsctl_buf,
-                       sizeof(struct file_zero_data_information), NULL, NULL);
+                       sizeof(struct file_zero_data_information),
+                       CIFSMaxBufSize, NULL, NULL);
        free_xid(xid);
        return rc;
 }
index 21ac19ff19cb2c3257f524f4aef90f0de2e8d342..21ad01d55ab2d32f0a4406c4cd0704cfab80026a 100644 (file)
@@ -1002,7 +1002,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
-               (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
+               (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
+               (char **)&pneg_rsp, &rsplen);
        if (rc == -EOPNOTSUPP) {
                /*
                 * Old Windows versions or Netapp SMB server can return
@@ -1858,8 +1859,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
 }
 
 static struct create_durable_v2 *
-create_durable_v2_buf(struct cifs_fid *pfid)
+create_durable_v2_buf(struct cifs_open_parms *oparms)
 {
+       struct cifs_fid *pfid = oparms->fid;
        struct create_durable_v2 *buf;
 
        buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
@@ -1873,7 +1875,14 @@ create_durable_v2_buf(struct cifs_fid *pfid)
                                (struct create_durable_v2, Name));
        buf->ccontext.NameLength = cpu_to_le16(4);
 
-       buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
+       /*
+        * NB: Handle timeout defaults to 0, which allows server to choose
+        * (most servers default to 120 seconds) and most clients default to 0.
+        * This can be overridden at mount ("handletimeout=") if the user wants
+        * a different persistent (or resilient) handle timeout for all opens
+        * opens on a particular SMB3 mount.
+        */
+       buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
        buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
        generate_random_uuid(buf->dcontext.CreateGuid);
        memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
@@ -1926,7 +1935,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
        struct smb2_create_req *req = iov[0].iov_base;
        unsigned int num = *num_iovec;
 
-       iov[num].iov_base = create_durable_v2_buf(oparms->fid);
+       iov[num].iov_base = create_durable_v2_buf(oparms);
        if (iov[num].iov_base == NULL)
                return -ENOMEM;
        iov[num].iov_len = sizeof(struct create_durable_v2);
@@ -2478,7 +2487,8 @@ creat_exit:
 int
 SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                u64 persistent_fid, u64 volatile_fid, u32 opcode,
-               bool is_fsctl, char *in_data, u32 indatalen)
+               bool is_fsctl, char *in_data, u32 indatalen,
+               __u32 max_response_size)
 {
        struct smb2_ioctl_req *req;
        struct kvec *iov = rqst->rq_iov;
@@ -2520,16 +2530,21 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
        req->OutputCount = 0; /* MBZ */
 
        /*
-        * Could increase MaxOutputResponse, but that would require more
-        * than one credit. Windows typically sets this smaller, but for some
+        * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
+        * We Could increase default MaxOutputResponse, but that could require
+        * more credits. Windows typically sets this smaller, but for some
         * ioctls it may be useful to allow server to send more. No point
         * limiting what the server can send as long as fits in one credit
-        * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
-        * (by default, note that it can be overridden to make max larger)
-        * in responses (except for read responses which can be bigger.
-        * We may want to bump this limit up
+        * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
+        * to increase this limit up in the future.
+        * Note that for snapshot queries that servers like Azure expect that
+        * the first query be minimal size (and just used to get the number/size
+        * of previous versions) so response size must be specified as EXACTLY
+        * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+        * of eight bytes.  Currently that is the only case where we set max
+        * response size smaller.
         */
-       req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
+       req->MaxOutputResponse = cpu_to_le32(max_response_size);
 
        if (is_fsctl)
                req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
@@ -2550,13 +2565,14 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
                cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
 }
 
+
 /*
  *     SMB2 IOCTL is used for both IOCTLs and FSCTLs
  */
 int
 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           u64 volatile_fid, u32 opcode, bool is_fsctl,
-          char *in_data, u32 indatalen,
+          char *in_data, u32 indatalen, u32 max_out_data_len,
           char **out_data, u32 *plen /* returned data len */)
 {
        struct smb_rqst rqst;
@@ -2593,8 +2609,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        rqst.rq_iov = iov;
        rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
 
-       rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid,
-                            opcode, is_fsctl, in_data, indatalen);
+       rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
+                            is_fsctl, in_data, indatalen, max_out_data_len);
        if (rc)
                goto ioctl_exit;
 
@@ -2672,7 +2688,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
                        FSCTL_SET_COMPRESSION, true /* is_fsctl */,
                        (char *)&fsctl_input /* data input */,
-                       2 /* in data len */, &ret_data /* out data */, NULL);
+                       2 /* in data len */, CIFSMaxBufSize /* max out data */,
+                       &ret_data /* out data */, NULL);
 
        cifs_dbg(FYI, "set compression rc %d\n", rc);
 
index 3c32d0cfea69b0c7191336e5b38247de578ed63b..52df125e918984139b176a5f4101ae6da11c7e0f 100644 (file)
@@ -142,11 +142,12 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
 extern void SMB2_open_free(struct smb_rqst *rqst);
 extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                    bool is_fsctl, char *in_data, u32 indatalen,
+                    bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
                     char **out_data, u32 *plen /* returned data len */);
 extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                           u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                          bool is_fsctl, char *in_data, u32 indatalen);
+                          bool is_fsctl, char *in_data, u32 indatalen,
+                          __u32 max_response_size);
 extern void SMB2_ioctl_free(struct smb_rqst *rqst);
 extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
                      u64 persistent_file_id, u64 volatile_file_id);
index 95b5e78c22b1e98811d3aca9c64c2c5deb54c6fe..f25daa207421c50cf38b1e4771ef5ab3332e9be1 100644 (file)
@@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
-static void debugfs_evict_inode(struct inode *inode)
+static void debugfs_i_callback(struct rcu_head *head)
 {
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
+       struct inode *inode = container_of(head, struct inode, i_rcu);
        if (S_ISLNK(inode->i_mode))
                kfree(inode->i_link);
+       free_inode_nonrcu(inode);
+}
+
+static void debugfs_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, debugfs_i_callback);
 }
 
 static const struct super_operations debugfs_super_operations = {
        .statfs         = simple_statfs,
        .remount_fs     = debugfs_remount,
        .show_options   = debugfs_show_options,
-       .evict_inode    = debugfs_evict_inode,
+       .destroy_inode  = debugfs_destroy_inode,
 };
 
 static void debugfs_release_dentry(struct dentry *dentry)
index 389ea53ea487538061ff3b6da78e27df2f894ac1..bccfc40b3a74ab002e45a07149afbe09634d8f64 100644 (file)
@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
 
        jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
 
-       if (f->target) {
-               kfree(f->target);
-               f->target = NULL;
-       }
-
        fds = f->dents;
        while(fds) {
                fd = fds;
index bb6ae387469f4d020424bfb13333e24d84e68123..05d892c79339f97276c81337c26929d8f53b7db2 100644 (file)
@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
 static void jffs2_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
-       kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+       struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+       kfree(f->target);
+       kmem_cache_free(jffs2_inode_cachep, f);
 }
 
 static void jffs2_destroy_inode(struct inode *inode)
index 8dc2818fdd84990b74e07a8d479de994b8e5be9b..12628184772c04b27c975568101a4b5cddf442f8 100644 (file)
@@ -276,14 +276,12 @@ static void ubifs_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct ubifs_inode *ui = ubifs_inode(inode);
+       kfree(ui->data);
        kmem_cache_free(ubifs_inode_slab, ui);
 }
 
 static void ubifs_destroy_inode(struct inode *inode)
 {
-       struct ubifs_inode *ui = ubifs_inode(inode);
-
-       kfree(ui->data);
        call_rcu(&inode->i_rcu, ubifs_i_callback);
 }
 
index 6fee8b1a4400842a7db69b0a08d6bc3edf436854..5cd824c1c0caa8c9adda4a8e6d640f43605cd4fb 100644 (file)
@@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
        if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
                              advertising))
                lcl_adv |= ADVERTISE_PAUSE_CAP;
-       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
                              advertising))
                lcl_adv |= ADVERTISE_PAUSE_ASYM;
 
index 022541dc5dbfd7b12a54601c1d1a59e30eed8a37..0d07296488448b28b608cc7795de638aef185ba6 100644 (file)
@@ -594,6 +594,8 @@ enum mlx5_pagefault_type_flags {
 };
 
 struct mlx5_td {
+       /* protects tirs list changes while tirs refresh */
+       struct mutex     list_lock;
        struct list_head tirs_list;
        u32              tdn;
 };
index be3cad9c2e4c37b282e5c2d0e2ef5f05a79b7438..583526aad1d0ac6ac3681cf9cf56d4496a221c3f 100644 (file)
@@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
                             unsigned char __user *data, int optlen);
 void ip_options_undo(struct ip_options *opt);
 void ip_forward_options(struct sk_buff *skb);
-int ip_options_rcv_srr(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
 
 /*
  *     Functions provided by ip_sockglue.c
index a68ced28d8f47e09c6f6beabc56a22a04ae3f163..12689ddfc24c44fe3297d1eda548811d8061670b 100644 (file)
@@ -59,6 +59,7 @@ struct net {
                                                 */
        spinlock_t              rules_mod_lock;
 
+       u32                     hash_mix;
        atomic64_t              cookie_gen;
 
        struct list_head        list;           /* list of network namespaces */
index 16a842456189f2fc1a3363685b5dd4310a32b2b8..d9b665151f3d9e916f35620141542a5a145e6123 100644 (file)
@@ -2,16 +2,10 @@
 #ifndef __NET_NS_HASH_H__
 #define __NET_NS_HASH_H__
 
-#include <asm/cache.h>
-
-struct net;
+#include <net/net_namespace.h>
 
 static inline u32 net_hash_mix(const struct net *net)
 {
-#ifdef CONFIG_NET_NS
-       return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
-#else
-       return 0;
-#endif
+       return net->hash_mix;
 }
 #endif
index 7d1a0483a17ba01b94643fd78acd72095a2e0adb..a2b38b3deeca2096d01d536d9a492047827ff994 100644 (file)
@@ -923,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
        sch->qstats.overlimits++;
 }
 
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+       __u32 qlen = qdisc_qlen_sum(sch);
+
+       return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
+                                            __u32 *backlog)
+{
+       struct gnet_stats_queue qstats = { 0 };
+       __u32 len = qdisc_qlen_sum(sch);
+
+       __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+       *qlen = qstats.qlen;
+       *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+       __u32 qlen, backlog;
+
+       qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+       qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+       __u32 qlen, backlog;
+
+       qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+       qdisc_reset(sch);
+       qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
 {
        qh->head = NULL;
@@ -1106,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
        sch_tree_lock(sch);
        old = *pold;
        *pold = new;
-       if (old != NULL) {
-               unsigned int qlen = old->q.qlen;
-               unsigned int backlog = old->qstats.backlog;
-
-               qdisc_reset(old);
-               qdisc_tree_reduce_backlog(old, qlen, backlog);
-       }
+       if (old != NULL)
+               qdisc_tree_flush_backlog(old);
        sch_tree_unlock(sch);
 
        return old;
index 8974b3755670e37b0540f3d48ee0b29da951a341..3c18260403dde1df951448c600b6ef9ac61f5635 100644 (file)
@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
 static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
                                         struct xdp_frame *xdpf)
 {
+       unsigned int hard_start_headroom;
        unsigned int frame_size;
        void *pkt_data_start;
        struct sk_buff *skb;
 
+       /* Part of headroom was reserved to xdpf */
+       hard_start_headroom = sizeof(struct xdp_frame) +  xdpf->headroom;
+
        /* build_skb need to place skb_shared_info after SKB end, and
         * also want to know the memory "truesize".  Thus, need to
         * know the memory frame size backing xdp_buff.
@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
         * is not at a fixed memory location, with mixed length
         * packets, which is bad for cache-line hotness.
         */
-       frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) +
+       frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-       pkt_data_start = xdpf->data - xdpf->headroom;
+       pkt_data_start = xdpf->data - hard_start_headroom;
        skb = build_skb(pkt_data_start, frame_size);
        if (!skb)
                return NULL;
 
-       skb_reserve(skb, xdpf->headroom);
+       skb_reserve(skb, hard_start_headroom);
        __skb_put(skb, xdpf->len);
        if (xdpf->metasize)
                skb_metadata_set(skb, xdpf->metasize);
@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
         * - RX ring dev queue index    (skb_record_rx_queue)
         */
 
+       /* Allow SKB to reuse area used by xdp_frame */
+       xdp_scrub_frame(xdpf);
+
        return skb;
 }
 
index 2ada5e21dfa62175d6cf9667ed4636e4c4ada659..4a8f390a2b821db8cff26f9716952b36013ab152 100644 (file)
@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
 }
 EXPORT_SYMBOL(bpf_prog_get_type_path);
 
-static void bpf_evict_inode(struct inode *inode)
-{
-       enum bpf_type type;
-
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
-
-       if (S_ISLNK(inode->i_mode))
-               kfree(inode->i_link);
-       if (!bpf_inode_type(inode, &type))
-               bpf_any_put(inode->i_private, type);
-}
-
 /*
  * Display the mount options in /proc/mounts.
  */
@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
+static void bpf_destroy_inode_deferred(struct rcu_head *head)
+{
+       struct inode *inode = container_of(head, struct inode, i_rcu);
+       enum bpf_type type;
+
+       if (S_ISLNK(inode->i_mode))
+               kfree(inode->i_link);
+       if (!bpf_inode_type(inode, &type))
+               bpf_any_put(inode->i_private, type);
+       free_inode_nonrcu(inode);
+}
+
+static void bpf_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
+}
+
 static const struct super_operations bpf_super_ops = {
        .statfs         = simple_statfs,
        .drop_inode     = generic_delete_inode,
        .show_options   = bpf_show_options,
-       .evict_inode    = bpf_evict_inode,
+       .destroy_inode  = bpf_destroy_inode,
 };
 
 enum {
index fd502c1f71eb003e5975ec58e33ccc8f8e1c0586..6c5a41f7f33856d79f641c57767c7c093ec2a831 100644 (file)
@@ -1897,8 +1897,9 @@ continue_func:
                }
                frame++;
                if (frame >= MAX_CALL_FRAMES) {
-                       WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
-                       return -EFAULT;
+                       verbose(env, "the call stack of %d frames is too deep !\n",
+                               frame);
+                       return -E2BIG;
                }
                goto process_func;
        }
index b7953934aa994e7993254aa6b04438815ed37f1f..f98448cf2defb5b2f212d005dcfa2a899252ae25 100644 (file)
@@ -3605,16 +3605,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
                if (unlikely(sig != kinfo.si_signo))
                        goto err;
 
+               /* Only allow sending arbitrary signals to yourself. */
+               ret = -EPERM;
                if ((task_pid(current) != pid) &&
-                   (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) {
-                       /* Only allow sending arbitrary signals to yourself. */
-                       ret = -EPERM;
-                       if (kinfo.si_code != SI_USER)
-                               goto err;
-
-                       /* Turn this into a regular kill signal. */
-                       prepare_kill_siginfo(sig, &kinfo);
-               }
+                   (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
+                       goto err;
        } else {
                prepare_kill_siginfo(sig, &kinfo);
        }
index f171a83707ced436bb2bd4508060a6cd45a95905..3319e0872d014628a6e505fc80d9daeb8d8a2b47 100644 (file)
@@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
                                                        bool check_target)
 {
        struct page *page = pfn_to_online_page(pfn);
+       struct page *block_page;
        struct page *end_page;
        unsigned long block_pfn;
 
@@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
            get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
                return false;
 
+       /* Ensure the start of the pageblock or zone is online and valid */
+       block_pfn = pageblock_start_pfn(pfn);
+       block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+       if (block_page) {
+               page = block_page;
+               pfn = block_pfn;
+       }
+
+       /* Ensure the end of the pageblock or zone is online and valid */
+       block_pfn += pageblock_nr_pages;
+       block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
+       end_page = pfn_to_online_page(block_pfn);
+       if (!end_page)
+               return false;
+
        /*
         * Only clear the hint if a sample indicates there is either a
         * free page or an LRU page in the block. One or other condition
         * is necessary for the block to be a migration source/target.
         */
-       block_pfn = pageblock_start_pfn(pfn);
-       pfn = max(block_pfn, zone->zone_start_pfn);
-       page = pfn_to_page(pfn);
-       if (zone != page_zone(page))
-               return false;
-       pfn = block_pfn + pageblock_nr_pages;
-       pfn = min(pfn, zone_end_pfn(zone));
-       end_page = pfn_to_page(pfn);
-
        do {
                if (pfn_valid_within(pfn)) {
                        if (check_source && PageLRU(page)) {
@@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 static void __reset_isolation_suitable(struct zone *zone)
 {
        unsigned long migrate_pfn = zone->zone_start_pfn;
-       unsigned long free_pfn = zone_end_pfn(zone);
+       unsigned long free_pfn = zone_end_pfn(zone) - 1;
        unsigned long reset_migrate = free_pfn;
        unsigned long reset_free = migrate_pfn;
        bool source_set = false;
@@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc)
                                count_compact_events(COMPACTISOLATED, nr_isolated);
                        } else {
                                /* If isolation fails, abort the search */
-                               order = -1;
+                               order = cc->search_order + 1;
                                page = NULL;
                        }
                }
index 15293c2a5dd821d39233c177d540e4db7407b110..8d77b6ee4477df71bc466c057338ea4ae62dcd88 100644 (file)
@@ -443,27 +443,29 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
        return rc;
 }
 
-static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+                                   struct scatterlist *sgl, unsigned int sgc)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
-       int rc = -EINVAL;
+       int rc = 0;
+
+       if (ops->ndo_fcoe_ddp_target)
+               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
 
-       if (ops->ndo_fcoe_get_wwn)
-               rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
+#endif
 
-static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
-                                   struct scatterlist *sgl, unsigned int sgc)
+#ifdef NETDEV_FCOE_WWNN
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
-       int rc = 0;
-
-       if (ops->ndo_fcoe_ddp_target)
-               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+       int rc = -EINVAL;
 
+       if (ops->ndo_fcoe_get_wwn)
+               rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
 #endif
@@ -794,9 +796,11 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
        .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
        .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
-       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
        .ndo_fcoe_ddp_target    = vlan_dev_fcoe_ddp_target,
 #endif
+#ifdef NETDEV_FCOE_WWNN
+       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
+#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = vlan_dev_poll_controller,
        .ndo_netpoll_setup      = vlan_dev_netpoll_setup,
index a9b7919c9de55396d35b152a86edbf814287d8dd..d5df0114f08ac5331dcd46699551556ba60a4b80 100644 (file)
@@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
 
                ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
 
-               /* free the TID stats immediately */
-               cfg80211_sinfo_release_content(&sinfo);
+               if (!ret) {
+                       /* free the TID stats immediately */
+                       cfg80211_sinfo_release_content(&sinfo);
+               }
 
                dev_put(real_netdev);
                if (ret == -ENOENT) {
index ef39aabdb69435f384ae2ebef26d4d31367f427c..4fb01108e5f534b8a055edface0717f950ccdacc 100644 (file)
@@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                                 const u8 *mac, const unsigned short vid)
 {
        struct batadv_bla_claim search_claim, *claim;
+       struct batadv_bla_claim *claim_removed_entry;
+       struct hlist_node *claim_removed_node;
 
        ether_addr_copy(search_claim.addr, mac);
        search_claim.vid = vid;
@@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
                   mac, batadv_print_vid(vid));
 
-       batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
-                          batadv_choose_claim, claim);
-       batadv_claim_put(claim); /* reference from the hash is gone */
+       claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+                                               batadv_compare_claim,
+                                               batadv_choose_claim, claim);
+       if (!claim_removed_node)
+               goto free_claim;
 
+       /* reference from the hash is gone */
+       claim_removed_entry = hlist_entry(claim_removed_node,
+                                         struct batadv_bla_claim, hash_entry);
+       batadv_claim_put(claim_removed_entry);
+
+free_claim:
        /* don't need the reference from hash_find() anymore */
        batadv_claim_put(claim);
 }
index 0b4b3fb778a61708978438bcaf7adfd05b997668..208655cf67179b5c2bbcfb8330993b42ca70109f 100644 (file)
@@ -1116,9 +1116,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
                                                struct attribute *attr,
                                                char *buff, size_t count)
 {
-       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
        struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
        struct batadv_hard_iface *hard_iface;
+       struct batadv_priv *bat_priv;
        u32 tp_override;
        u32 old_tp_override;
        bool ret;
@@ -1147,7 +1147,10 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
 
        atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
 
-       batadv_netlink_notify_hardif(bat_priv, hard_iface);
+       if (hard_iface->soft_iface) {
+               bat_priv = netdev_priv(hard_iface->soft_iface);
+               batadv_netlink_notify_hardif(bat_priv, hard_iface);
+       }
 
 out:
        batadv_hardif_put(hard_iface);
index f73d79139ae79834a3e429fab82b7d55e8a373d5..26c4e2493ddfbfdea26a9b45409821834010a913 100644 (file)
@@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
                                  struct batadv_tt_global_entry *tt_global,
                                  const char *message)
 {
+       struct batadv_tt_global_entry *tt_removed_entry;
+       struct hlist_node *tt_removed_node;
+
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Deleting global tt entry %pM (vid: %d): %s\n",
                   tt_global->common.addr,
                   batadv_print_vid(tt_global->common.vid), message);
 
-       batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-                          batadv_choose_tt, &tt_global->common);
-       batadv_tt_global_entry_put(tt_global);
+       tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+                                            batadv_compare_tt,
+                                            batadv_choose_tt,
+                                            &tt_global->common);
+       if (!tt_removed_node)
+               return;
+
+       /* drop reference of remove hash entry */
+       tt_removed_entry = hlist_entry(tt_removed_node,
+                                      struct batadv_tt_global_entry,
+                                      common.hash_entry);
+       batadv_tt_global_entry_put(tt_removed_entry);
 }
 
 /**
@@ -1337,9 +1349,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
                           unsigned short vid, const char *message,
                           bool roaming)
 {
+       struct batadv_tt_local_entry *tt_removed_entry;
        struct batadv_tt_local_entry *tt_local_entry;
        u16 flags, curr_flags = BATADV_NO_FLAGS;
-       void *tt_entry_exists;
+       struct hlist_node *tt_removed_node;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1368,15 +1381,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
         */
        batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
 
-       tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+       tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
                                             batadv_compare_tt,
                                             batadv_choose_tt,
                                             &tt_local_entry->common);
-       if (!tt_entry_exists)
+       if (!tt_removed_node)
                goto out;
 
-       /* extra call to free the local tt entry */
-       batadv_tt_local_entry_put(tt_local_entry);
+       /* drop reference of remove hash entry */
+       tt_removed_entry = hlist_entry(tt_removed_node,
+                                      struct batadv_tt_local_entry,
+                                      common.hash_entry);
+       batadv_tt_local_entry_put(tt_removed_entry);
 
 out:
        if (tt_local_entry)
index a0e369179f6d1316ec261521b857a7687272d3b9..02da21d771c9cc7d93c442f0310ad2ab5d9b66f6 100644 (file)
@@ -601,6 +601,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
        if (ipv4_is_local_multicast(group))
                return 0;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
        br_group.vid = vid;
@@ -1497,6 +1498,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
 
        own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
        br_group.vid = vid;
@@ -1520,6 +1522,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
 
        own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
        br_group.vid = vid;
index b2651bb6d2a31dde065000c59bbbf3dfdadd976e..e657289db4ac44b023ce40ca9959185a6a33cb22 100644 (file)
@@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                        break;
 
                sk_busy_loop(sk, flags & MSG_DONTWAIT);
-       } while (!skb_queue_empty(&sk->sk_receive_queue));
+       } while (sk->sk_receive_queue.prev != *last);
 
        error = -EAGAIN;
 
index 2b67f2aa59ddb64d27378bed44f9d262093219e0..fdcff29df9158bb850bc57cd04fe4a5971176d07 100644 (file)
@@ -5014,8 +5014,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
        if (pt_prev->list_func != NULL)
                pt_prev->list_func(head, pt_prev, orig_dev);
        else
-               list_for_each_entry_safe(skb, next, head, list)
+               list_for_each_entry_safe(skb, next, head, list) {
+                       skb_list_del_init(skb);
                        pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+               }
 }
 
 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
index b1eb324197321dd390596edd2aec6c343ee14654..36ed619faf3641ae4d40edc7385b003de7500764 100644 (file)
@@ -1797,11 +1797,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
        WARN_ON_ONCE(!ret);
 
        gstrings.len = ret;
-       data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
-       if (gstrings.len && !data)
-               return -ENOMEM;
 
-       __ethtool_get_strings(dev, gstrings.string_set, data);
+       if (gstrings.len) {
+               data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
+               if (!data)
+                       return -ENOMEM;
+
+               __ethtool_get_strings(dev, gstrings.string_set, data);
+       } else {
+               data = NULL;
+       }
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1897,11 +1902,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = vzalloc(array_size(n_stats, sizeof(u64)));
-       if (n_stats && !data)
-               return -ENOMEM;
 
-       ops->get_ethtool_stats(dev, &stats, data);
+       if (n_stats) {
+               data = vzalloc(array_size(n_stats, sizeof(u64)));
+               if (!data)
+                       return -ENOMEM;
+               ops->get_ethtool_stats(dev, &stats, data);
+       } else {
+               data = NULL;
+       }
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
@@ -1941,16 +1950,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = vzalloc(array_size(n_stats, sizeof(u64)));
-       if (n_stats && !data)
-               return -ENOMEM;
 
-       if (dev->phydev && !ops->get_ethtool_phy_stats) {
-               ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
-               if (ret < 0)
-                       return ret;
+       if (n_stats) {
+               data = vzalloc(array_size(n_stats, sizeof(u64)));
+               if (!data)
+                       return -ENOMEM;
+
+               if (dev->phydev && !ops->get_ethtool_phy_stats) {
+                       ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
+                       if (ret < 0)
+                               goto out;
+               } else {
+                       ops->get_ethtool_phy_stats(dev, &stats, data);
+               }
        } else {
-               ops->get_ethtool_phy_stats(dev, &stats, data);
+               data = NULL;
        }
 
        ret = -EFAULT;
index 647c63a7b25b6745e75a812b65a4052f3c72b690..fc92ebc4e200c5e7857734c749510b0542cf7290 100644 (file)
@@ -6613,14 +6613,8 @@ static bool flow_dissector_is_valid_access(int off, int size,
                                           const struct bpf_prog *prog,
                                           struct bpf_insn_access_aux *info)
 {
-       if (type == BPF_WRITE) {
-               switch (off) {
-               case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
-                       break;
-               default:
-                       return false;
-               }
-       }
+       if (type == BPF_WRITE)
+               return false;
 
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, data):
@@ -6632,11 +6626,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
        case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                info->reg_type = PTR_TO_FLOW_KEYS;
                break;
-       case bpf_ctx_range(struct __sk_buff, tc_classid):
-       case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range_till(struct __sk_buff, family, local_port):
-       case bpf_ctx_range(struct __sk_buff, tstamp):
-       case bpf_ctx_range(struct __sk_buff, wire_len):
+       default:
                return false;
        }
 
index bb1a54747d64811a5545a78243f8cf021a4adf46..94a450b2191a9e25ca79534f9caa68a442daa80c 100644 (file)
@@ -707,6 +707,7 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
        /* Pass parameters to the BPF program */
        memset(flow_keys, 0, sizeof(*flow_keys));
        cb->qdisc_cb.flow_keys = flow_keys;
+       flow_keys->n_proto = skb->protocol;
        flow_keys->nhoff = skb_network_offset(skb);
        flow_keys->thoff = flow_keys->nhoff;
 
@@ -716,7 +717,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
        /* Restore state */
        memcpy(cb, &cb_saved, sizeof(cb_saved));
 
-       flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
+       flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
+                                  skb_network_offset(skb), skb->len);
        flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
                                   flow_keys->nhoff, skb->len);
 
index 17f36317363d19dcdeb6a6e75a116220b078c2b0..7e6dcc6257011d8b60e132e97a0db229c39d1daf 100644 (file)
@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 
        refcount_set(&net->count, 1);
        refcount_set(&net->passive, 1);
+       get_random_bytes(&net->hash_mix, sizeof(u32));
        net->dev_base_seq = 1;
        net->user_ns = user_ns;
        idr_init(&net->netns_ids);
index 2415d9cb9b89fefb30a7932a70c3497aeb67c80e..ef2cd5712098965d3729f3ba748a8727ee300760 100644 (file)
@@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
        unsigned int delta_truesize;
        struct sk_buff *lp;
 
-       if (unlikely(p->len + len >= 65536))
+       if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
                return -E2BIG;
 
        lp = NAPI_GRO_CB(p)->last;
index f227f002c73d382fecd98c8857ce4c9139cb7a8a..db87d9f5801983913e66549e5d5911ead10f3ac1 100644 (file)
@@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local,
        if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len))
                return -ENOMEM;
 
-       return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval);
+       if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) {
+               kfree(fval.sp.vec);
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 /**
index ed4f6dc26365baa3e9988b2f11ac26d8ffeb55b7..85c22ada47449d580ee2a175c0729f4bad2cad61 100644 (file)
@@ -98,8 +98,18 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
        return skb;
 }
 
+static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+                                int *offset)
+{
+       *offset = QCA_HDR_LEN;
+       *proto = ((__be16 *)skb->data)[0];
+
+       return 0;
+}
+
 const struct dsa_device_ops qca_netdev_ops = {
        .xmit   = qca_tag_xmit,
        .rcv    = qca_tag_rcv,
+       .flow_dissect = qca_tag_flow_dissect,
        .overhead = QCA_HDR_LEN,
 };
index ecce2dc78f17eb48d91f8f6638ef0a4e8076fedf..1132d6d1796a4f7c947da76b9b39e7fbe11d3399 100644 (file)
@@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
                       ip_local_deliver_finish);
 }
 
-static inline bool ip_rcv_options(struct sk_buff *skb)
+static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_options *opt;
        const struct iphdr *iph;
-       struct net_device *dev = skb->dev;
 
        /* It looks as overkill, because not all
           IP options require packet mangling.
@@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
                        }
                }
 
-               if (ip_options_rcv_srr(skb))
+               if (ip_options_rcv_srr(skb, dev))
                        goto drop;
        }
 
@@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
        }
 #endif
 
-       if (iph->ihl > 5 && ip_rcv_options(skb))
+       if (iph->ihl > 5 && ip_rcv_options(skb, dev))
                goto drop;
 
        rt = skb_rtable(skb);
index 32a35043c9f590314b7fa354d5e948b59e665214..3db31bb9df50622f8c9ae961f4eabc566d1cb74a 100644 (file)
@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
        }
 }
 
-int ip_options_rcv_srr(struct sk_buff *skb)
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_options *opt = &(IPCB(skb)->opt);
        int srrspace, srrptr;
@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
 
                orefdst = skb->_skb_refdst;
                skb_dst_set(skb, NULL);
-               err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
+               err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
                rt2 = skb_rtable(skb);
                if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
                        skb_dst_drop(skb);
index cd4814f7e96223447195f0d0ac224c54d5501d2e..359da68d7c0628360d5f1b727c878f678e28d39a 100644 (file)
@@ -67,11 +67,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
 module_param(dctcp_alpha_on_init, uint, 0644);
 MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
 
-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
-                "parameter for clamping alpha on loss");
-
 static struct tcp_congestion_ops dctcp_reno;
 
 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
@@ -164,21 +159,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
        }
 }
 
-static void dctcp_state(struct sock *sk, u8 new_state)
+static void dctcp_react_to_loss(struct sock *sk)
 {
-       if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
-               struct dctcp *ca = inet_csk_ca(sk);
+       struct dctcp *ca = inet_csk_ca(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
 
-               /* If this extension is enabled, we clamp dctcp_alpha to
-                * max on packet loss; the motivation is that dctcp_alpha
-                * is an indicator to the extend of congestion and packet
-                * loss is an indicator of extreme congestion; setting
-                * this in practice turned out to be beneficial, and
-                * effectively assumes total congestion which reduces the
-                * window by half.
-                */
-               ca->dctcp_alpha = DCTCP_MAX_ALPHA;
-       }
+       ca->loss_cwnd = tp->snd_cwnd;
+       tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+}
+
+static void dctcp_state(struct sock *sk, u8 new_state)
+{
+       if (new_state == TCP_CA_Recovery &&
+           new_state != inet_csk(sk)->icsk_ca_state)
+               dctcp_react_to_loss(sk);
+       /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+        * one loss-adjustment per RTT.
+        */
 }
 
 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@@ -190,6 +187,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
        case CA_EVENT_ECN_NO_CE:
                dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
                break;
+       case CA_EVENT_LOSS:
+               dctcp_react_to_loss(sk);
+               break;
        default:
                /* Don't care for the rest. */
                break;
index 277d71239d755d858be70663320d8de2ab23dfcc..2f8039a26b08fa2b13b5e4da642c0f4ff8207571 100644 (file)
@@ -2578,7 +2578,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
 {
        int cpu;
 
-       module_put(net->ipv4.tcp_congestion_control->owner);
+       if (net->ipv4.tcp_congestion_control)
+               module_put(net->ipv4.tcp_congestion_control->owner);
 
        for_each_possible_cpu(cpu)
                inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
index 79d2e43c05c5e792b1498c4bf5f73756252e2c7d..5fc1f4e0c0cf0d3dd403c2dcaf291ea9c096d235 100644 (file)
@@ -417,6 +417,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
 
 done:
        rhashtable_walk_stop(&iter);
+       rhashtable_walk_exit(&iter);
        return ret;
 }
 
index edbd12067170bc77332d57a04c96812d9702520b..e51f3c648b094afe1d60a518db36a42444c4c55d 100644 (file)
@@ -601,7 +601,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                inet6_sk(skb->sk) : NULL;
        struct ipv6hdr *tmp_hdr;
        struct frag_hdr *fh;
-       unsigned int mtu, hlen, left, len;
+       unsigned int mtu, hlen, left, len, nexthdr_offset;
        int hroom, troom;
        __be32 frag_id;
        int ptr, offset = 0, err = 0;
@@ -612,6 +612,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                goto fail;
        hlen = err;
        nexthdr = *prevhdr;
+       nexthdr_offset = prevhdr - skb_network_header(skb);
 
        mtu = ip6_skb_dst_mtu(skb);
 
@@ -646,6 +647,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
            (err = skb_checksum_help(skb)))
                goto fail;
 
+       prevhdr = skb_network_header(skb) + nexthdr_offset;
        hroom = LL_RESERVED_SPACE(rt->dst.dev);
        if (skb_has_frag_list(skb)) {
                unsigned int first_len = skb_pagelen(skb);
index 0c6403cf8b5226fbe4bf2e4506b3816b30973b0b..ade1390c63488a60b405ca70052b3493fecc67d5 100644 (file)
@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
                                           eiph->daddr, eiph->saddr, 0, 0,
                                           IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
-               if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
+               if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
                        if (!IS_ERR(rt))
                                ip_rt_put(rt);
                        goto out;
@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        } else {
                if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
                                   skb2->dev) ||
-                   skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
+                   skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
                        goto out;
        }
 
index 07e21a82ce4cc2e41af8e38961f9917d357fd20b..b2109b74857d053f52b06c42698cc393d5838609 100644 (file)
@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
                    !net_eq(tunnel->net, dev_net(tunnel->dev))))
                        goto out;
 
+               /* skb can be uncloned in iptunnel_pull_header, so
+                * old iph is no longer valid
+                */
+               iph = (const struct iphdr *)skb_mac_header(skb);
                err = IP_ECN_decapsulate(iph, skb);
                if (unlikely(err)) {
                        if (log_ecn_error)
index c5c5ab6c5a1ccdf55eb7891e8c21ea3cdf7d2a28..44fdc641710dbdb64cedb3306ab822573cb97477 100644 (file)
@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
        if (err)
                goto fail;
 
-       err = sock_register(&kcm_family_ops);
-       if (err)
-               goto sock_register_fail;
-
        err = register_pernet_device(&kcm_net_ops);
        if (err)
                goto net_ops_fail;
 
+       err = sock_register(&kcm_family_ops);
+       if (err)
+               goto sock_register_fail;
+
        err = kcm_proc_init();
        if (err)
                goto proc_init_fail;
@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
        return 0;
 
 proc_init_fail:
-       unregister_pernet_device(&kcm_net_ops);
-
-net_ops_fail:
        sock_unregister(PF_KCM);
 
 sock_register_fail:
+       unregister_pernet_device(&kcm_net_ops);
+
+net_ops_fail:
        proto_unregister(&kcm_proto);
 
 fail:
@@ -2090,8 +2090,8 @@ fail:
 static void __exit kcm_exit(void)
 {
        kcm_proc_exit();
-       unregister_pernet_device(&kcm_net_ops);
        sock_unregister(PF_KCM);
+       unregister_pernet_device(&kcm_net_ops);
        proto_unregister(&kcm_proto);
        destroy_workqueue(kcm_wq);
 
index 691da853bef5cb801d963cae4e9bf7b23a3dddd6..4bdf5e3ac2087a67e715ebd720159205697668a8 100644 (file)
@@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
 
        struct sw_flow_actions *acts;
        int new_acts_size;
-       int req_size = NLA_ALIGN(attr_len);
+       size_t req_size = NLA_ALIGN(attr_len);
        int next_offset = offsetof(struct sw_flow_actions, actions) +
                                        (*sfa)->actions_len;
 
        if (req_size <= (ksize(*sfa) - next_offset))
                goto out;
 
-       new_acts_size = ksize(*sfa) * 2;
+       new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
 
        if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
                if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
index fd2694174607405ab96f6f0dea10bc8dcc8caea9..faf726e00e27c75b11721dbc55518ca60bdf00a6 100644 (file)
@@ -608,7 +608,7 @@ static void rds_tcp_kill_sock(struct net *net)
        list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
                struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
-               if (net != c_net || !tc->t_sock)
+               if (net != c_net)
                        continue;
                if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
                        list_move_tail(&tc->t_tcp_node, &tmp_list);
index 4060b0955c97db68872a88d6bd05d5143fdc2e7c..0f82d50ea23245be1ce34fcce1cdb4a048c1af17 100644 (file)
@@ -45,8 +45,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        struct nlattr *tb[TCA_SAMPLE_MAX + 1];
        struct psample_group *psample_group;
        struct tcf_chain *goto_ch = NULL;
+       u32 psample_group_num, rate;
        struct tc_sample *parm;
-       u32 psample_group_num;
        struct tcf_sample *s;
        bool exists = false;
        int ret, err;
@@ -85,6 +85,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        if (err < 0)
                goto release_idr;
 
+       rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+       if (!rate) {
+               NL_SET_ERR_MSG(extack, "invalid sample rate");
+               err = -EINVAL;
+               goto put_chain;
+       }
        psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
        psample_group = psample_group_get(net, psample_group_num);
        if (!psample_group) {
@@ -96,7 +102,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 
        spin_lock_bh(&s->tcf_lock);
        goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
-       s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+       s->rate = rate;
        s->psample_group_num = psample_group_num;
        RCU_INIT_POINTER(s->psample_group, psample_group);
 
index 459921bd3d87b5a563d3725015f8765b20566aa0..a13bc351a4148f40f434b25a9adffc4cf9137548 100644 (file)
@@ -130,6 +130,11 @@ static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
 
 static void *mall_get(struct tcf_proto *tp, u32 handle)
 {
+       struct cls_mall_head *head = rtnl_dereference(tp->root);
+
+       if (head && head->handle == handle)
+               return head;
+
        return NULL;
 }
 
index acc9b9da985f81ffd9b485e082cf1781e6731ba2..259d97bc2abd39df8df646c2ebc34ea272e1fd70 100644 (file)
@@ -1517,16 +1517,27 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
 
 static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
 {
+       int wlen = skb_network_offset(skb);
        u8 dscp;
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
+               wlen += sizeof(struct iphdr);
+               if (!pskb_may_pull(skb, wlen) ||
+                   skb_try_make_writable(skb, wlen))
+                       return 0;
+
                dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
                if (wash && dscp)
                        ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
                return dscp;
 
        case htons(ETH_P_IPV6):
+               wlen += sizeof(struct ipv6hdr);
+               if (!pskb_may_pull(skb, wlen) ||
+                   skb_try_make_writable(skb, wlen))
+                       return 0;
+
                dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
                if (wash && dscp)
                        ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
index 4dc05409e3fb2742c1af9467aae5d1bf221b7101..114b9048ea7e3682106c6e65644d4d0992e20461 100644 (file)
@@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
+       __u32 qlen;
 
        cl->xstats.avgidle = cl->avgidle;
        cl->xstats.undertime = 0;
+       qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
 
        if (cl->undertime != PSCHED_PASTPERFECT)
                cl->xstats.undertime = cl->undertime - q->now;
@@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
+           gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
-       unsigned int qlen, backlog;
 
        if (cl->filters || cl->children || cl == &q->link)
                return -EBUSY;
 
        sch_tree_lock(sch);
 
-       qlen = cl->q->q.qlen;
-       backlog = cl->q->qstats.backlog;
-       qdisc_reset(cl->q);
-       qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
+       qdisc_purge_queue(cl->q);
 
        if (cl->next_alive)
                cbq_deactivate_class(cl);
index 09b8009910657ace91e838eccfa520c81d800750..430df9a55ec4e9742786fb869ab3acf28e84f5ed 100644 (file)
@@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
        return container_of(clc, struct drr_class, common);
 }
 
-static void drr_purge_queue(struct drr_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
        [TCA_DRR_QUANTUM]       = { .type = NLA_U32 },
 };
@@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       drr_purge_queue(cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->common);
 
        sch_tree_unlock(sch);
@@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
                                struct gnet_dump *d)
 {
        struct drr_class *cl = (struct drr_class *)arg;
-       __u32 qlen = cl->qdisc->q.qlen;
+       __u32 qlen = qdisc_qlen_sum(cl->qdisc);
+       struct Qdisc *cl_q = cl->qdisc;
        struct tc_drr_stats xstats;
 
        memset(&xstats, 0, sizeof(xstats));
@@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
+           gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 24cc220a3218aee4f6c44ed271050c0b8d137ec9..d2ab463f22ae8b122ae43d1969cf795fb11c05b3 100644 (file)
@@ -844,16 +844,6 @@ qdisc_peek_len(struct Qdisc *sch)
        return len;
 }
 
-static void
-hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static void
 hfsc_adjust_levels(struct hfsc_class *cl)
 {
@@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
        list_add_tail(&cl->siblings, &parent->children);
        if (parent->level == 0)
-               hfsc_purge_queue(sch, parent);
+               qdisc_purge_queue(parent->qdisc);
        hfsc_adjust_levels(parent);
        sch_tree_unlock(sch);
 
@@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
        list_del(&cl->siblings);
        hfsc_adjust_levels(cl->cl_parent);
 
-       hfsc_purge_queue(sch, cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
 
        sch_tree_unlock(sch);
@@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 {
        struct hfsc_class *cl = (struct hfsc_class *)arg;
        struct tc_hfsc_stats xstats;
+       __u32 qlen;
 
-       cl->qstats.backlog = cl->qdisc->qstats.backlog;
+       qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
        xstats.level   = cl->level;
        xstats.period  = cl->cl_vtperiod;
        xstats.work    = cl->cl_total;
@@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
+           gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 30f9da7e1076368f2b0718d2bb0e1e3c5432998c..2f9883b196e8e6b10abd9b623b6285274a003ff6 100644 (file)
@@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
        };
        __u32 qlen = 0;
 
-       if (!cl->level && cl->leaf.q) {
-               qlen = cl->leaf.q->q.qlen;
-               qs.backlog = cl->leaf.q->qstats.backlog;
-       }
+       if (!cl->level && cl->leaf.q)
+               qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
+
        cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
                                    INT_MIN, INT_MAX);
        cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
@@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       if (!cl->level) {
-               unsigned int qlen = cl->leaf.q->q.qlen;
-               unsigned int backlog = cl->leaf.q->qstats.backlog;
-
-               qdisc_reset(cl->leaf.q);
-               qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
-       }
+       if (!cl->level)
+               qdisc_purge_queue(cl->leaf.q);
 
        /* delete from hash and active; remainder in destroy_class */
        qdisc_class_hash_remove(&q->clhash, &cl->common);
@@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                                          classid, NULL);
                sch_tree_lock(sch);
                if (parent && !parent->level) {
-                       unsigned int qlen = parent->leaf.q->q.qlen;
-                       unsigned int backlog = parent->leaf.q->qstats.backlog;
-
                        /* turn parent into inner node */
-                       qdisc_reset(parent->leaf.q);
-                       qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
+                       qdisc_purge_queue(parent->leaf.q);
                        qdisc_put(parent->leaf.q);
                        if (parent->prio_activity)
                                htb_deactivate(q, parent);
index 203659bc3906419f6a00edca96561efb503d608d..3a3312467692c4f17bc03a78299322ac9a67250c 100644 (file)
@@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
        sch = dev_queue->qdisc_sleeping;
        if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+           qdisc_qstats_copy(d, sch) < 0)
                return -1;
        return 0;
 }
index d364e63c396d78fe8866b9a8d7aa1ec9b281814e..ea0dc112b38dd4ac43d0fdc15f4742583c380991 100644 (file)
@@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                sch = dev_queue->qdisc_sleeping;
                if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                          d, NULL, &sch->bstats) < 0 ||
-                   gnet_stats_copy_queue(d, NULL,
-                                         &sch->qstats, sch->q.qlen) < 0)
+                   qdisc_qstats_copy(d, sch) < 0)
                        return -1;
        }
        return 0;
index 7410ce4d03213d315696ec933722edbc6c542f2a..35b03ae08e0f1f8afbd10f6f4c1d6078e22a48d1 100644 (file)
@@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
        for (i = q->bands; i < q->max_bands; i++) {
                if (q->queues[i] != &noop_qdisc) {
                        struct Qdisc *child = q->queues[i];
+
                        q->queues[i] = &noop_qdisc;
-                       qdisc_tree_reduce_backlog(child, child->q.qlen,
-                                                 child->qstats.backlog);
+                       qdisc_tree_flush_backlog(child);
                        qdisc_put(child);
                }
        }
@@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
                                        qdisc_hash_add(child, true);
 
                                if (old != &noop_qdisc) {
-                                       qdisc_tree_reduce_backlog(old,
-                                                                 old->q.qlen,
-                                                                 old->qstats.backlog);
+                                       qdisc_tree_flush_backlog(old);
                                        qdisc_put(old);
                                }
                                sch_tree_unlock(sch);
@@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
        cl_q = q->queues[cl - 1];
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl_q->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl_q) < 0)
                return -1;
 
        return 0;
index 847141cd900f1933f0b48684085c747f06c092c1..d519b21535b36b1f163460593573cb018cd1a904 100644 (file)
@@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
        q->bands = qopt->bands;
        memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-       for (i = q->bands; i < oldbands; i++) {
-               struct Qdisc *child = q->queues[i];
-
-               qdisc_tree_reduce_backlog(child, child->q.qlen,
-                                         child->qstats.backlog);
-       }
+       for (i = q->bands; i < oldbands; i++)
+               qdisc_tree_flush_backlog(q->queues[i]);
 
        for (i = oldbands; i < q->bands; i++) {
                q->queues[i] = queues[i];
@@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
        cl_q = q->queues[cl - 1];
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl_q->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl_q) < 0)
                return -1;
 
        return 0;
index 29f5c4a2468829457ddf734aa1e7711ebfe4bcc8..1589364b54da11dc241212dee190dad741d9d9bc 100644 (file)
@@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
        return container_of(clc, struct qfq_class, common);
 }
 
-static void qfq_purge_queue(struct qfq_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
        [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
        [TCA_QFQ_LMAX] = { .type = NLA_U32 },
@@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       qfq_purge_queue(cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->common);
 
        sch_tree_unlock(sch);
@@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL,
-                                 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl->qdisc) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 9df9942340eaaa30ed38fc3345649f287a373bee..4e8c0abf619459f396b91fc587271b7938e48330 100644 (file)
@@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
        q->flags = ctl->flags;
        q->limit = ctl->limit;
        if (child) {
-               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                         q->qdisc->qstats.backlog);
+               qdisc_tree_flush_backlog(q->qdisc);
                old_child = q->qdisc;
                q->qdisc = child;
        }
index bab506b01a32950d2ac07ff04815424705c62503..2419fdb759667a5c124f2018a310aabe9318b257 100644 (file)
@@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
                qdisc_hash_add(child, true);
        sch_tree_lock(sch);
 
-       qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                 q->qdisc->qstats.backlog);
+       qdisc_tree_flush_backlog(q->qdisc);
        qdisc_put(q->qdisc);
        q->qdisc = child;
 
index 206e4dbed12f0e08a2c8782fedac1247995001bb..c7041999eb5d348e7520dab451205fda7a72d22b 100644 (file)
@@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
        sch = dev_queue->qdisc_sleeping;
        if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+           qdisc_qstats_copy(d, sch) < 0)
                return -1;
        return 0;
 }
index 7f272a9070c5753e61dd140eca77afe4d17d6692..f71578dbb9e39292329e06d98535c764043acd55 100644 (file)
@@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
 
        sch_tree_lock(sch);
        if (child) {
-               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                         q->qdisc->qstats.backlog);
+               qdisc_tree_flush_backlog(q->qdisc);
                qdisc_put(q->qdisc);
                q->qdisc = child;
        }
index 6abc8b274270730e482730bbc3ef735a7ffd2e52..951afdeea5e92c7cab48f53482d307c3f9893d89 100644 (file)
@@ -600,6 +600,7 @@ out:
 static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
 {
        /* No address mapping for V4 sockets */
+       memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
        return sizeof(struct sockaddr_in);
 }
 
index 4ad3586da8f028c0fb8244382b343a1c7635f6cb..340a6e7c43a7d39c596de4f1b0045e4200edc2a3 100644 (file)
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
        if (msg->rep_type)
                tipc_tlv_init(msg->rep, msg->rep_type);
 
-       if (cmd->header)
-               (*cmd->header)(msg);
+       if (cmd->header) {
+               err = (*cmd->header)(msg);
+               if (err) {
+                       kfree_skb(msg->rep);
+                       msg->rep = NULL;
+                       return err;
+               }
+       }
 
        arg = nlmsg_new(0, GFP_KERNEL);
        if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
        if (!bearer)
                return -EMSGSIZE;
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_bearer_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_BEARER_NAME);
        if (!string_is_valid(b->name, len))
                return -EINVAL;
 
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
 
        lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_link_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_LINK_NAME);
        if (!string_is_valid(lc->name, len))
                return -EINVAL;
 
index 425351ac2a9b156aacf9234e566d7c5ba0dc5867..20b1912279694a457fb4bc9d5287186ced1afacc 100644 (file)
@@ -1484,6 +1484,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
 
                                return err;
                        }
+               } else {
+                       *zc = false;
                }
 
                rxm->full_len -= padding_length(ctx, tls_ctx, skb);
index 5bf8e52c41fcaf2bb38127d4bb076a9164539ddb..8e7c56e9590fbb0dfa2d76780696206b166ec440 100644 (file)
@@ -177,7 +177,7 @@ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
 
 $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
        $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
-                                   -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
+                                   -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
        @ln -sf $(@F) $(OUTPUT)libbpf.so
        @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
 
@@ -220,8 +220,9 @@ install_lib: all_cmd
 install_headers:
        $(call QUIET_INSTALL, headers) \
                $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
-               $(call do_install,libbpf.h,$(prefix)/include/bpf,644);
-               $(call do_install,btf.h,$(prefix)/include/bpf,644);
+               $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
+               $(call do_install,btf.h,$(prefix)/include/bpf,644); \
+               $(call do_install,xsk.h,$(prefix)/include/bpf,644);
 
 install: install_lib
 
index 87e3020ac1bc8b3772d98ce58751fc2d6f979184..cf119c9b6f2700e790ec02e16b524f6b3a4cf581 100644 (file)
@@ -2107,6 +2107,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
                return fwd_kind == real_kind;
        }
 
+       if (cand_kind != canon_kind)
+               return 0;
+
        switch (cand_kind) {
        case BTF_KIND_INT:
                return btf_equal_int(cand_type, canon_type);
index c3fad065c89c085b39da83de4a751041a99ae3d6..c7727be9719f4ea9b9524ddf1ff92daeebab8982 100644 (file)
@@ -44,6 +44,7 @@
 #include <cpuid.h>
 #include <linux/capability.h>
 #include <errno.h>
+#include <math.h>
 
 char *proc_stat = "/proc/stat";
 FILE *outf;
@@ -63,7 +64,6 @@ unsigned int dump_only;
 unsigned int do_snb_cstates;
 unsigned int do_knl_cstates;
 unsigned int do_slm_cstates;
-unsigned int do_cnl_cstates;
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
@@ -141,9 +141,21 @@ unsigned int first_counter_read = 1;
 
 #define RAPL_CORES_ENERGY_STATUS       (1 << 9)
                                        /* 0x639 MSR_PP0_ENERGY_STATUS */
+#define RAPL_PER_CORE_ENERGY   (1 << 10)
+                                       /* Indicates cores energy collection is per-core,
+                                        * not per-package. */
+#define RAPL_AMD_F17H          (1 << 11)
+                                       /* 0xc0010299 MSR_RAPL_PWR_UNIT */
+                                       /* 0xc001029a MSR_CORE_ENERGY_STAT */
+                                       /* 0xc001029b MSR_PKG_ENERGY_STAT */
 #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
 #define        TJMAX_DEFAULT   100
 
+/* MSRs that are not yet in the kernel-provided header. */
+#define MSR_RAPL_PWR_UNIT      0xc0010299
+#define MSR_CORE_ENERGY_STAT   0xc001029a
+#define MSR_PKG_ENERGY_STAT    0xc001029b
+
 #define MAX(a, b) ((a) > (b) ? (a) : (b))
 
 /*
@@ -187,6 +199,7 @@ struct core_data {
        unsigned long long c7;
        unsigned long long mc6_us;      /* duplicate as per-core for now, even though per module */
        unsigned int core_temp_c;
+       unsigned int core_energy;       /* MSR_CORE_ENERGY_STAT */
        unsigned int core_id;
        unsigned long long counter[MAX_ADDED_COUNTERS];
 } *core_even, *core_odd;
@@ -273,6 +286,7 @@ struct system_summary {
 
 struct cpu_topology {
        int physical_package_id;
+       int die_id;
        int logical_cpu_id;
        int physical_node_id;
        int logical_node_id;    /* 0-based count within the package */
@@ -283,6 +297,7 @@ struct cpu_topology {
 
 struct topo_params {
        int num_packages;
+       int num_die;
        int num_cpus;
        int num_cores;
        int max_cpu_num;
@@ -314,9 +329,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
        int retval, pkg_no, core_no, thread_no, node_no;
 
        for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
-               for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
-                       for (node_no = 0; node_no < topo.nodes_per_pkg;
-                            node_no++) {
+               for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
+                       for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
                                for (thread_no = 0; thread_no <
                                        topo.threads_per_core; ++thread_no) {
                                        struct thread_data *t;
@@ -442,6 +456,7 @@ struct msr_counter bic[] = {
        { 0x0, "CPU" },
        { 0x0, "APIC" },
        { 0x0, "X2APIC" },
+       { 0x0, "Die" },
 };
 
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -495,6 +510,7 @@ struct msr_counter bic[] = {
 #define        BIC_CPU         (1ULL << 47)
 #define        BIC_APIC        (1ULL << 48)
 #define        BIC_X2APIC      (1ULL << 49)
+#define        BIC_Die         (1ULL << 50)
 
 #define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
 
@@ -621,6 +637,8 @@ void print_header(char *delim)
                outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Package))
                outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_Die))
+               outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Node))
                outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Core))
@@ -667,7 +685,7 @@ void print_header(char *delim)
 
        if (DO_BIC(BIC_CPU_c1))
                outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+       if (DO_BIC(BIC_CPU_c3))
                outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
        if (DO_BIC(BIC_CPU_c6))
                outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
@@ -680,6 +698,14 @@ void print_header(char *delim)
        if (DO_BIC(BIC_CoreTmp))
                outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
 
+       if (do_rapl && !rapl_joules) {
+               if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+                       outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
+       } else if (do_rapl && rapl_joules) {
+               if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+                       outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
+       }
+
        for (mp = sys.cp; mp; mp = mp->next) {
                if (mp->format == FORMAT_RAW) {
                        if (mp->width == 64)
@@ -734,7 +760,7 @@ void print_header(char *delim)
        if (do_rapl && !rapl_joules) {
                if (DO_BIC(BIC_PkgWatt))
                        outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
-               if (DO_BIC(BIC_CorWatt))
+               if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                        outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
                if (DO_BIC(BIC_GFXWatt))
                        outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
@@ -747,7 +773,7 @@ void print_header(char *delim)
        } else if (do_rapl && rapl_joules) {
                if (DO_BIC(BIC_Pkg_J))
                        outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
-               if (DO_BIC(BIC_Cor_J))
+               if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                        outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
                if (DO_BIC(BIC_GFX_J))
                        outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
@@ -808,6 +834,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, "c6: %016llX\n", c->c6);
                outp += sprintf(outp, "c7: %016llX\n", c->c7);
                outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
+               outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
 
                for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                        outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
@@ -904,6 +931,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (t == &average.threads) {
                if (DO_BIC(BIC_Package))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_Die))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_Node))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_Core))
@@ -921,6 +950,12 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        else
                                outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                }
+               if (DO_BIC(BIC_Die)) {
+                       if (c)
+                               outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
+                       else
+                               outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               }
                if (DO_BIC(BIC_Node)) {
                        if (t)
                                outp += sprintf(outp, "%s%d",
@@ -1003,7 +1038,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+       if (DO_BIC(BIC_CPU_c3))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
        if (DO_BIC(BIC_CPU_c6))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
@@ -1033,6 +1068,20 @@ int format_counters(struct thread_data *t, struct core_data *c,
                }
        }
 
+       /*
+        * If measurement interval exceeds minimum RAPL Joule Counter range,
+        * indicate that results are suspect by printing "**" in fraction place.
+        */
+       if (interval_float < rapl_joule_counter_range)
+               fmt8 = "%s%.2f";
+       else
+               fmt8 = "%6.0f**";
+
+       if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+               outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
+       if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+               outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
+
        /* print per-package data only for 1st core in package */
        if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
                goto done;
@@ -1085,18 +1134,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (DO_BIC(BIC_SYS_LPI))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
 
-       /*
-        * If measurement interval exceeds minimum RAPL Joule Counter range,
-        * indicate that results are suspect by printing "**" in fraction place.
-        */
-       if (interval_float < rapl_joule_counter_range)
-               fmt8 = "%s%.2f";
-       else
-               fmt8 = "%6.0f**";
-
        if (DO_BIC(BIC_PkgWatt))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
-       if (DO_BIC(BIC_CorWatt))
+       if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
        if (DO_BIC(BIC_GFXWatt))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
@@ -1104,7 +1144,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
        if (DO_BIC(BIC_Pkg_J))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
-       if (DO_BIC(BIC_Cor_J))
+       if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
        if (DO_BIC(BIC_GFX_J))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
@@ -1249,6 +1289,8 @@ delta_core(struct core_data *new, struct core_data *old)
        old->core_temp_c = new->core_temp_c;
        old->mc6_us = new->mc6_us - old->mc6_us;
 
+       DELTA_WRAP32(new->core_energy, old->core_energy);
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (mp->format == FORMAT_RAW)
                        old->counter[i] = new->counter[i];
@@ -1391,6 +1433,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
        c->c7 = 0;
        c->mc6_us = 0;
        c->core_temp_c = 0;
+       c->core_energy = 0;
 
        p->pkg_wtd_core_c0 = 0;
        p->pkg_any_core_c0 = 0;
@@ -1473,6 +1516,8 @@ int sum_counters(struct thread_data *t, struct core_data *c,
 
        average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
 
+       average.cores.core_energy += c->core_energy;
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (mp->format == FORMAT_RAW)
                        continue;
@@ -1818,7 +1863,7 @@ retry:
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) {
+       if (DO_BIC(BIC_CPU_c3)) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
        }
@@ -1845,6 +1890,12 @@ retry:
                c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
        }
 
+       if (do_rapl & RAPL_AMD_F17H) {
+               if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
+                       return -14;
+               c->core_energy = msr & 0xFFFFFFFF;
+       }
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (get_mp(cpu, mp, &c->counter[i]))
                        return -10;
@@ -1934,6 +1985,11 @@ retry:
                        return -16;
                p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
        }
+       if (do_rapl & RAPL_AMD_F17H) {
+               if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
+                       return -13;
+               p->energy_pkg = msr & 0xFFFFFFFF;
+       }
        if (DO_BIC(BIC_PkgTmp)) {
                if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
                        return -17;
@@ -2456,6 +2512,8 @@ void free_all_buffers(void)
 
 /*
  * Parse a file containing a single int.
+ * Return 0 if file can not be opened
+ * Exit if file can be opened, but can not be parsed
  */
 int parse_int_file(const char *fmt, ...)
 {
@@ -2467,7 +2525,9 @@ int parse_int_file(const char *fmt, ...)
        va_start(args, fmt);
        vsnprintf(path, sizeof(path), fmt, args);
        va_end(args);
-       filep = fopen_or_die(path, "r");
+       filep = fopen(path, "r");
+       if (!filep)
+               return 0;
        if (fscanf(filep, "%d", &value) != 1)
                err(1, "%s: failed to parse number from file", path);
        fclose(filep);
@@ -2488,6 +2548,11 @@ int get_physical_package_id(int cpu)
        return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
 }
 
+int get_die_id(int cpu)
+{
+       return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
+}
+
 int get_core_id(int cpu)
 {
        return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -2578,7 +2643,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
        filep = fopen_or_die(path, "r");
        do {
                offset -= BITMASK_SIZE;
-               fscanf(filep, "%lx%c", &map, &character);
+               if (fscanf(filep, "%lx%c", &map, &character) != 2)
+                       err(1, "%s: failed to parse file", path);
                for (shift = 0; shift < BITMASK_SIZE; shift++) {
                        if ((map >> shift) & 0x1) {
                                so = shift + offset;
@@ -2855,8 +2921,11 @@ int snapshot_cpu_lpi_us(void)
        fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us);
-       if (retval != 1)
-               err(1, "CPU LPI");
+       if (retval != 1) {
+               fprintf(stderr, "Disabling Low Power Idle CPU output\n");
+               BIC_NOT_PRESENT(BIC_CPU_LPI);
+               return -1;
+       }
 
        fclose(fp);
 
@@ -2878,9 +2947,11 @@ int snapshot_sys_lpi_us(void)
        fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
-       if (retval != 1)
-               err(1, "SYS LPI");
-
+       if (retval != 1) {
+               fprintf(stderr, "Disabling Low Power Idle System output\n");
+               BIC_NOT_PRESENT(BIC_SYS_LPI);
+               return -1;
+       }
        fclose(fp);
 
        return 0;
@@ -3410,14 +3481,14 @@ dump_sysfs_cstate_config(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
 
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
                if (!sp)
                        sp = strchrnul(name_buf, '\n');
                *sp = '\0';
-
                fclose(input);
 
                sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
@@ -3425,7 +3496,8 @@ dump_sysfs_cstate_config(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(desc, sizeof(desc), input);
+               if (!fgets(desc, sizeof(desc), input))
+                       err(1, "%s: failed to read file", path);
 
                fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
                fclose(input);
@@ -3444,20 +3516,22 @@ dump_sysfs_pstate_config(void)
                        base_cpu);
        input = fopen(path, "r");
        if (input == NULL) {
-               fprintf(stderr, "NSFOD %s\n", path);
+               fprintf(outf, "NSFOD %s\n", path);
                return;
        }
-       fgets(driver_buf, sizeof(driver_buf), input);
+       if (!fgets(driver_buf, sizeof(driver_buf), input))
+               err(1, "%s: failed to read file", path);
        fclose(input);
 
        sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
                        base_cpu);
        input = fopen(path, "r");
        if (input == NULL) {
-               fprintf(stderr, "NSFOD %s\n", path);
+               fprintf(outf, "NSFOD %s\n", path);
                return;
        }
-       fgets(governor_buf, sizeof(governor_buf), input);
+       if (!fgets(governor_buf, sizeof(governor_buf), input))
+               err(1, "%s: failed to read file", path);
        fclose(input);
 
        fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
@@ -3466,7 +3540,8 @@ dump_sysfs_pstate_config(void)
        sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
        input = fopen(path, "r");
        if (input != NULL) {
-               fscanf(input, "%d", &turbo);
+               if (fscanf(input, "%d", &turbo) != 1)
+                       err(1, "%s: failed to parse number from file", path);
                fprintf(outf, "cpufreq boost: %d\n", turbo);
                fclose(input);
        }
@@ -3474,7 +3549,8 @@ dump_sysfs_pstate_config(void)
        sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
        input = fopen(path, "r");
        if (input != NULL) {
-               fscanf(input, "%d", &turbo);
+               if (fscanf(input, "%d", &turbo) != 1)
+                       err(1, "%s: failed to parse number from file", path);
                fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
                fclose(input);
        }
@@ -3718,7 +3794,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
 #define        RAPL_POWER_GRANULARITY  0x7FFF  /* 15 bit power granularity */
 #define        RAPL_TIME_GRANULARITY   0x3F /* 6 bit time granularity */
 
-double get_tdp(unsigned int model)
+double get_tdp_intel(unsigned int model)
 {
        unsigned long long msr;
 
@@ -3735,6 +3811,16 @@ double get_tdp(unsigned int model)
        }
 }
 
+double get_tdp_amd(unsigned int family)
+{
+       switch (family) {
+       case 0x17:
+       default:
+               /* This is the max stock TDP of HEDT/Server Fam17h chips */
+               return 250.0;
+       }
+}
+
 /*
  * rapl_dram_energy_units_probe()
  * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
@@ -3754,21 +3840,12 @@ rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
        }
 }
 
-
-/*
- * rapl_probe()
- *
- * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
- */
-void rapl_probe(unsigned int family, unsigned int model)
+void rapl_probe_intel(unsigned int family, unsigned int model)
 {
        unsigned long long msr;
        unsigned int time_unit;
        double tdp;
 
-       if (!genuine_intel)
-               return;
-
        if (family != 6)
                return;
 
@@ -3892,13 +3969,69 @@ void rapl_probe(unsigned int family, unsigned int model)
 
        rapl_time_units = 1.0 / (1 << (time_unit));
 
-       tdp = get_tdp(model);
+       tdp = get_tdp_intel(model);
 
        rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
        if (!quiet)
                fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
 
-       return;
+void rapl_probe_amd(unsigned int family, unsigned int model)
+{
+       unsigned long long msr;
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int has_rapl = 0;
+       double tdp;
+
+       if (max_extended_level >= 0x80000007) {
+               __cpuid(0x80000007, eax, ebx, ecx, edx);
+               /* RAPL (Fam 17h) */
+               has_rapl = edx & (1 << 14);
+       }
+
+       if (!has_rapl)
+               return;
+
+       switch (family) {
+       case 0x17: /* Zen, Zen+ */
+               do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
+               if (rapl_joules) {
+                       BIC_PRESENT(BIC_Pkg_J);
+                       BIC_PRESENT(BIC_Cor_J);
+               } else {
+                       BIC_PRESENT(BIC_PkgWatt);
+                       BIC_PRESENT(BIC_CorWatt);
+               }
+               break;
+       default:
+               return;
+       }
+
+       if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
+               return;
+
+       rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf));
+       rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
+       rapl_power_units = ldexp(1.0, -(msr & 0xf));
+
+       tdp = get_tdp_amd(model);
+
+       rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+       if (!quiet)
+               fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
+
+/*
+ * rapl_probe()
+ *
+ * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
+ */
+void rapl_probe(unsigned int family, unsigned int model)
+{
+       if (genuine_intel)
+               rapl_probe_intel(family, model);
+       if (authentic_amd)
+               rapl_probe_amd(family, model);
 }
 
 void perf_limit_reasons_probe(unsigned int family, unsigned int model)
@@ -4003,6 +4136,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 {
        unsigned long long msr;
+       const char *msr_name;
        int cpu;
 
        if (!do_rapl)
@@ -4018,10 +4152,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return -1;
        }
 
-       if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
-               return -1;
+       if (do_rapl & RAPL_AMD_F17H) {
+               msr_name = "MSR_RAPL_PWR_UNIT";
+               if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
+                       return -1;
+       } else {
+               msr_name = "MSR_RAPL_POWER_UNIT";
+               if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
+                       return -1;
+       }
 
-       fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr,
+       fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
                rapl_power_units, rapl_energy_units, rapl_time_units);
 
        if (do_rapl & RAPL_PKG_POWER_INFO) {
@@ -4451,6 +4592,9 @@ unsigned int intel_model_duplicates(unsigned int model)
        case INTEL_FAM6_KABYLAKE_MOBILE:
        case INTEL_FAM6_KABYLAKE_DESKTOP:
                return INTEL_FAM6_SKYLAKE_MOBILE;
+
+       case INTEL_FAM6_ICELAKE_MOBILE:
+               return INTEL_FAM6_CANNONLAKE_MOBILE;
        }
        return model;
 }
@@ -4702,7 +4846,9 @@ void process_cpuid()
        }
        do_slm_cstates = is_slm(family, model);
        do_knl_cstates  = is_knl(family, model);
-       do_cnl_cstates = is_cnl(family, model);
+
+       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
+               BIC_NOT_PRESENT(BIC_CPU_c3);
 
        if (!quiet)
                decode_misc_pwr_mgmt_msr();
@@ -4769,6 +4915,7 @@ void topology_probe()
        int i;
        int max_core_id = 0;
        int max_package_id = 0;
+       int max_die_id = 0;
        int max_siblings = 0;
 
        /* Initialize num_cpus, max_cpu_num */
@@ -4835,6 +4982,11 @@ void topology_probe()
                if (cpus[i].physical_package_id > max_package_id)
                        max_package_id = cpus[i].physical_package_id;
 
+               /* get die information */
+               cpus[i].die_id = get_die_id(i);
+               if (cpus[i].die_id > max_die_id)
+                       max_die_id = cpus[i].die_id;
+
                /* get numa node information */
                cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
                if (cpus[i].physical_node_id > topo.max_node_num)
@@ -4860,6 +5012,13 @@ void topology_probe()
        if (!summary_only && topo.cores_per_node > 1)
                BIC_PRESENT(BIC_Core);
 
+       topo.num_die = max_die_id + 1;
+       if (debug > 1)
+               fprintf(outf, "max_die_id %d, sizing for %d die\n",
+                               max_die_id, topo.num_die);
+       if (!summary_only && topo.num_die > 1)
+               BIC_PRESENT(BIC_Die);
+
        topo.num_packages = max_package_id + 1;
        if (debug > 1)
                fprintf(outf, "max_package_id %d, sizing for %d packages\n",
@@ -4884,8 +5043,8 @@ void topology_probe()
                if (cpu_is_not_present(i))
                        continue;
                fprintf(outf,
-                       "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
-                       i, cpus[i].physical_package_id,
+                       "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
+                       i, cpus[i].physical_package_id, cpus[i].die_id,
                        cpus[i].physical_node_id,
                        cpus[i].logical_node_id,
                        cpus[i].physical_core_id,
@@ -5122,7 +5281,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 18.07.27"
+       fprintf(outf, "turbostat version 19.03.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5319,7 +5478,8 @@ void probe_sysfs(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
 
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
@@ -5346,7 +5506,8 @@ void probe_sysfs(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
                if (!sp)
index bcbd928c96aba4ab3a2a13984b1987f1497a412d..fc818bc1d7294454093a949b3e5f394ca9879982 100644 (file)
@@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = {
        .n_proto = __bpf_constant_htons(ETH_P_IPV6),
 };
 
+#define VLAN_HLEN      4
+
+static struct {
+       struct ethhdr eth;
+       __u16 vlan_tci;
+       __u16 vlan_proto;
+       struct iphdr iph;
+       struct tcphdr tcp;
+} __packed pkt_vlan_v4 = {
+       .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
+       .vlan_proto = __bpf_constant_htons(ETH_P_IP),
+       .iph.ihl = 5,
+       .iph.protocol = IPPROTO_TCP,
+       .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v4_flow_keys = {
+       .nhoff = VLAN_HLEN,
+       .thoff = VLAN_HLEN + sizeof(struct iphdr),
+       .addr_proto = ETH_P_IP,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IP),
+};
+
+static struct {
+       struct ethhdr eth;
+       __u16 vlan_tci;
+       __u16 vlan_proto;
+       __u16 vlan_tci2;
+       __u16 vlan_proto2;
+       struct ipv6hdr iph;
+       struct tcphdr tcp;
+} __packed pkt_vlan_v6 = {
+       .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
+       .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
+       .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
+       .iph.nexthdr = IPPROTO_TCP,
+       .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v6_flow_keys = {
+       .nhoff = VLAN_HLEN * 2,
+       .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr),
+       .addr_proto = ETH_P_IPV6,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+};
+
 void test_flow_dissector(void)
 {
        struct bpf_flow_keys flow_keys;
@@ -68,5 +120,21 @@ void test_flow_dissector(void)
              err, errno, retval, duration, size, sizeof(flow_keys));
        CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
 
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys,
+                       pkt_vlan_v4_flow_keys);
+
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys,
+                       pkt_vlan_v6_flow_keys);
+
        bpf_object__close(obj);
 }
index 284660f5aa9533aaee034e13242f5ea098729676..75b17cada53937e5e237e00049a1bc22176cb83c 100644 (file)
@@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
 {
        struct bpf_flow_keys *keys = skb->flow_keys;
 
-       keys->n_proto = proto;
        switch (proto) {
        case bpf_htons(ETH_P_IP):
                bpf_tail_call(skb, &jmp_table, IP);
@@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
 SEC("flow_dissector")
 int _dissect(struct __sk_buff *skb)
 {
-       if (!skb->vlan_present)
-               return parse_eth_proto(skb, skb->protocol);
-       else
-               return parse_eth_proto(skb, skb->vlan_proto);
+       struct bpf_flow_keys *keys = skb->flow_keys;
+
+       return parse_eth_proto(skb, keys->n_proto);
 }
 
 /* Parses on IPPROTO_* */
@@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb)
 {
        struct bpf_flow_keys *keys = skb->flow_keys;
        struct vlan_hdr *vlan, _vlan;
-       __be16 proto;
-
-       /* Peek back to see if single or double-tagging */
-       if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
-                              sizeof(proto)))
-               return BPF_DROP;
 
        /* Account for double-tagging */
-       if (proto == bpf_htons(ETH_P_8021AD)) {
+       if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
                vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
                if (!vlan)
                        return BPF_DROP;
@@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb)
                if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
                        return BPF_DROP;
 
+               keys->nhoff += sizeof(*vlan);
                keys->thoff += sizeof(*vlan);
        }
 
@@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
        if (!vlan)
                return BPF_DROP;
 
+       keys->nhoff += sizeof(*vlan);
        keys->thoff += sizeof(*vlan);
        /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
        if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
            vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
                return BPF_DROP;
 
+       keys->n_proto = vlan->h_vlan_encapsulated_proto;
        return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
 }
 
index 23e3b314ca603956ce88ed4ac8f1512eeddfa34f..ec5794e4205bc04dabcaa20532cfe4cd2fcf6bbd 100644 (file)
@@ -5776,6 +5776,53 @@ const struct btf_dedup_test dedup_tests[] = {
                .dedup_table_size = 1, /* force hash collisions */
        },
 },
+{
+       .descr = "dedup: void equiv check",
+       /*
+        * // CU 1:
+        * struct s {
+        *      struct {} *x;
+        * };
+        * // CU 2:
+        * struct s {
+        *      int *x;
+        * };
+        */
+       .input = {
+               .raw_types = {
+                       /* CU 1 */
+                       BTF_STRUCT_ENC(0, 0, 1),                                /* [1] struct {}  */
+                       BTF_PTR_ENC(1),                                         /* [2] ptr -> [1] */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [3] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+                       /* CU 2 */
+                       BTF_PTR_ENC(0),                                         /* [4] ptr -> void */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [5] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0s\0x"),
+       },
+       .expect = {
+               .raw_types = {
+                       /* CU 1 */
+                       BTF_STRUCT_ENC(0, 0, 1),                                /* [1] struct {}  */
+                       BTF_PTR_ENC(1),                                         /* [2] ptr -> [1] */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [3] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+                       /* CU 2 */
+                       BTF_PTR_ENC(0),                                         /* [4] ptr -> void */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [5] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0s\0x"),
+       },
+       .opts = {
+               .dont_resolve_fwds = false,
+               .dedup_table_size = 1, /* force hash collisions */
+       },
+},
 {
        .descr = "dedup: all possible kinds (no duplicates)",
        .input = {
index f2ccae39ee66b32c8b60890dcacff0bd89c8abd0..fb11240b758b1a60f864473d4ee4caa1ff2932a4 100644 (file)
        .errstr = "call stack",
        .result = REJECT,
 },
+{
+       "calls: stack depth check in dead code",
+       .insns = {
+       /* main */
+       BPF_MOV64_IMM(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+       BPF_EXIT_INSN(),
+       /* A */
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       /* B */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+       BPF_EXIT_INSN(),
+       /* C */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+       BPF_EXIT_INSN(),
+       /* D */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+       BPF_EXIT_INSN(),
+       /* E */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+       BPF_EXIT_INSN(),
+       /* F */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+       BPF_EXIT_INSN(),
+       /* G */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+       BPF_EXIT_INSN(),
+       /* H */
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .errstr = "call stack",
+       .result = REJECT,
+},
 {
        "calls: spill into caller stack frame",
        .insns = {
index 27f0acaed880e765e9829306b1cc3a28e2755cbb..ddabb160a11bacc151b9891f14f7da62846b8ed1 100644 (file)
             "$TC actions flush action sample"
         ]
     },
+    {
+        "id": "7571",
+        "name": "Add sample action with invalid rate",
+        "category": [
+            "actions",
+            "sample"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action sample",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action sample index 2",
+        "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action sample"
+        ]
+    },
     {
         "id": "b6d4",
         "name": "Add sample action with mandatory arguments and invalid control action",