]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
authorDavid S. Miller <davem@davemloft.net>
Mon, 16 May 2016 17:56:37 +0000 (13:56 -0400)
committerDavid S. Miller <davem@davemloft.net>
Mon, 16 May 2016 17:56:37 +0000 (13:56 -0400)
Johan Hedberg says:

====================
pull request: bluetooth-next 2016-05-14

Here are two more Bluetooth patches for the 4.7 kernel which we wanted
to get into net-next before the merge window opens. Please let me know
if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
254 files changed:
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/regmap/regmap.txt
Documentation/sysctl/kernel.txt
Documentation/sysctl/net.txt
MAINTAINERS
arch/arm/Kconfig
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/net/bpf_jit_comp.c
arch/mips/Kconfig
arch/powerpc/Kconfig
arch/s390/Kconfig
arch/s390/net/bpf_jit_comp.c
arch/sparc/Kconfig
arch/x86/Kconfig
arch/x86/configs/kvm_guest.config
arch/x86/events/intel/core.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/msr.c
arch/x86/include/asm/uaccess.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/emulate.c
arch/x86/net/bpf_jit_comp.c
block/blk-map.c
crypto/testmgr.c
drivers/base/regmap/internal.h
drivers/base/regmap/regmap-mmio.c
drivers/base/regmap/regmap-spmi.c
drivers/gpu/drm/amd/amdgpu/atombios_dp.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/radeon_dp_auxch.c
drivers/input/misc/max8997_haptic.c
drivers/input/misc/twl6040-vibra.c
drivers/input/mouse/byd.c
drivers/media/v4l2-core/videobuf2-v4l2.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/ezchip/nps_enet.h
drivers/net/ethernet/intel/e1000e/80003es2lan.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/nvm.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/e1000e/phy.h
drivers/net/ethernet/intel/e1000e/ptp.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_hmc.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_mac.c
drivers/net/ethernet/intel/igb/e1000_mbx.c
drivers/net/ethernet/intel/igb/e1000_nvm.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/defines.h
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/igbvf/igbvf.h
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igbvf/vf.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/wiznet/w5100-spi.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5100.h
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/phy/lxt.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/xen-netback/Makefile
drivers/net/xen-netback/common.h
drivers/net/xen-netback/hash.c [new file with mode: 0644]
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/nvdimm/pmem.c
drivers/pci/bus.c
drivers/pinctrl/pinctrl-at91-pio4.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/da9063-regulator.c
drivers/regulator/gpio-regulator.c
drivers/regulator/s2mps11.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/qla1280.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-ti-qspi.c
fs/ecryptfs/file.c
fs/isofs/rock.c
fs/kernfs/dir.c
fs/kernfs/mount.c
fs/namei.c
fs/ocfs2/acl.c
fs/ocfs2/acl.h
fs/ocfs2/file.c
fs/ocfs2/namei.c
fs/ocfs2/refcounttree.c
fs/ocfs2/xattr.c
fs/ocfs2/xattr.h
fs/open.c
fs/overlayfs/super.c
fs/proc/base.c
fs/splice.c
include/linux/dcache.h
include/linux/filter.h
include/linux/kernfs.h
include/linux/mfd/samsung/s2mps11.h
include/linux/mlx5/driver.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/namei.h
include/linux/netdevice.h
include/linux/swap.h
include/linux/uio.h
include/net/act_api.h
include/net/netlink.h
include/net/pkt_cls.h
include/net/sch_generic.h
include/uapi/linux/if.h
include/uapi/linux/libc-compat.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/tc_act/Kbuild
kernel/bpf/core.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/workqueue.c
lib/asn1_decoder.c
lib/iov_iter.c
lib/test_bpf.c
mm/huge_memory.c
mm/ksm.c
mm/memory.c
mm/swapfile.c
mm/zsmalloc.c
net/Kconfig
net/core/filter.c
net/core/sysctl_net_core.c
net/ipv4/fib_semantics.c
net/ipv4/ip_gre.c
net/ipv4/ip_sockglue.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/datagram.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nfnetlink_acct.c
net/netfilter/xt_IDLETIMER.c
net/openvswitch/conntrack.c
net/qrtr/Kconfig
net/qrtr/Makefile
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_vlan.c
net/sched/cls_flower.c
net/sched/cls_u32.c
net/switchdev/Kconfig
net/x25/x25_facilities.c
sound/pci/hda/hda_sysfs.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks.c
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-dwarf_getlocations.c [new file with mode: 0644]
tools/lib/traceevent/parse-filter.c
tools/perf/arch/x86/util/dwarf-regs.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/config/Makefile
tools/perf/util/dwarf-aux.c
tools/perf/util/event.c
tools/perf/util/evsel.c
tools/perf/util/parse-events.c
tools/perf/util/sort.c
tools/perf/util/thread_map.c

index bc1c3c8bf8fa37fa7e08dcabc65f1752a8a79749..c00a9a8945478272bea7feab230b9cc7178c09d3 100644 (file)
@@ -35,6 +35,8 @@ Optional Properties:
 - broken-turn-around: If set, indicates the PHY device does not correctly
   release the turn around line low at the end of a MDIO transaction.
 
+- reset-gpios: Reference to a GPIO used to reset the phy.
+
 Example:
 
 ethernet-phy@0 {
@@ -42,4 +44,5 @@ ethernet-phy@0 {
        interrupt-parent = <40000>;
        interrupts = <35 1>;
        reg = <0>;
+       reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
 };
index e98a9652ccc8c4d3a2263fe5a67b9064b27d1f04..0127be360fe852d70a853026c03de1fcc8c8a7a0 100644 (file)
@@ -1,50 +1,29 @@
-Device-Tree binding for regmap
-
-The endianness mode of CPU & Device scenarios:
-Index     Device     Endianness properties
----------------------------------------------------
-1         BE         'big-endian'
-2         LE         'little-endian'
-3        Native     'native-endian'
-
-For one device driver, which will run in different scenarios above
-on different SoCs using the devicetree, we need one way to simplify
-this.
+Devicetree binding for regmap
 
 Optional properties:
-- {big,little,native}-endian: these are boolean properties, if absent
-  then the implementation will choose a default based on the device
-  being controlled.  These properties are for register values and all
-  the buffers only.  Native endian means that the CPU and device have
-  the same endianness.
 
-Examples:
-Scenario 1 : CPU in LE mode & device in LE mode.
-dev: dev@40031000 {
-             compatible = "name";
-             reg = <0x40031000 0x1000>;
-             ...
-};
+   little-endian,
+   big-endian,
+   native-endian:      See common-properties.txt for a definition
 
-Scenario 2 : CPU in LE mode & device in BE mode.
-dev: dev@40031000 {
-             compatible = "name";
-             reg = <0x40031000 0x1000>;
-             ...
-             big-endian;
-};
+Note:
+Regmap defaults to little-endian register access on MMIO based
+devices, this is by far the most common setting. On CPU
+architectures that typically run big-endian operating systems
+(e.g. PowerPC), registers can be defined as big-endian and must
+be marked that way in the devicetree.
 
-Scenario 3 : CPU in BE mode & device in BE mode.
-dev: dev@40031000 {
-             compatible = "name";
-             reg = <0x40031000 0x1000>;
-             ...
-};
+On SoCs that can be operated in both big-endian and little-endian
+modes, with a single hardware switch controlling both the endianess
+of the CPU and a byteswap for MMIO registers (e.g. many Broadcom MIPS
+chips), "native-endian" is used to allow using the same device tree
+blob in both cases.
 
-Scenario 4 : CPU in BE mode & device in LE mode.
+Examples:
+Scenario 1 : a register set in big-endian mode.
 dev: dev@40031000 {
-             compatible = "name";
+             compatible = "syscon";
              reg = <0x40031000 0x1000>;
+             big-endian;
              ...
-             little-endian;
 };
index 57653a44b128c821520b071f0ec4b53bfacc7b96..fcddfd5ded999a3ac8d5624f150439c402e8f0ed 100644 (file)
@@ -645,7 +645,7 @@ allowed to execute.
 perf_event_paranoid:
 
 Controls use of the performance events system by unprivileged
-users (without CAP_SYS_ADMIN).  The default value is 1.
+users (without CAP_SYS_ADMIN).  The default value is 2.
 
  -1: Allow use of (almost) all events by all users
 >=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
index 809ab6efcc744ec3dcad28bb075d696e6613f836..f0480f7ea7404c777c1a8943047278d6765a4fe6 100644 (file)
@@ -43,6 +43,17 @@ Values :
        1 - enable the JIT
        2 - enable the JIT and ask the compiler to emit traces on kernel log.
 
+bpf_jit_harden
+--------------
+
+This enables hardening for the Berkeley Packet Filter Just in Time compiler.
+Supported are eBPF JIT backends. Enabling hardening trades off performance,
+but can mitigate JIT spraying.
+Values :
+       0 - disable JIT hardening (default value)
+       1 - enable JIT hardening for unprivileged users only
+       2 - enable JIT hardening for all users
+
 dev_weight
 --------------
 
index b57df66532d2828a5e42ccfd7284511726e68334..458304bfe2d5c91a1979720c8945d4f32e3d057e 100644 (file)
@@ -11315,6 +11315,20 @@ F:     include/trace/
 F:     kernel/trace/
 F:     tools/testing/selftests/ftrace/
 
+TRACING MMIO ACCESSES (MMIOTRACE)
+M:     Steven Rostedt <rostedt@goodmis.org>
+M:     Ingo Molnar <mingo@kernel.org>
+R:     Karol Herbst <karolherbst@gmail.com>
+R:     Pekka Paalanen <ppaalanen@gmail.com>
+S:     Maintained
+L:     linux-kernel@vger.kernel.org
+L:     nouveau@lists.freedesktop.org
+F:     kernel/trace/trace_mmiotrace.c
+F:     include/linux/mmiotrace.h
+F:     arch/x86/mm/kmmio.c
+F:     arch/x86/mm/mmio-mod.c
+F:     arch/x86/mm/testmmiotrace.c
+
 TRIVIAL PATCHES
 M:     Jiri Kosina <trivial@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
index cdfa6c2b7626f3a761364ac072fc359b5f02592a..2315b0d1b4f40ff2cd09a579d8a6f33eab492a77 100644 (file)
@@ -41,7 +41,7 @@ config ARM
        select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARM_SMCCC if CPU_V7
-       select HAVE_BPF_JIT
+       select HAVE_CBPF_JIT
        select HAVE_CC_STACKPROTECTOR
        select HAVE_CONTEXT_TRACKING
        select HAVE_C_RECORDMCOUNT
index 0827d594b1f0ef3750146690c5ce11babf1fc65b..cd0cd5fd09a33bdf17b3f609d1e3497b446c9412 100644 (file)
 
                        pmc: pmc@fffffc00 {
                                compatible = "atmel,at91sam9x5-pmc", "syscon";
-                               reg = <0xfffffc00 0x100>;
+                               reg = <0xfffffc00 0x200>;
                                interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
                                interrupt-controller;
                                #address-cells = <1>;
index 78996bdbd3df38c30fb0a0051cd0db3e9a9249eb..9817090c1b731540a2dcd4cb3dc480608040fbfa 100644 (file)
                        status = "disabled";
 
                        nfc@c0000000 {
-                               compatible = "atmel,sama5d4-nfc";
+                               compatible = "atmel,sama5d3-nfc";
                                #address-cells = <1>;
                                #size-cells = <1>;
                                reg = < /* NFC Command Registers */
index 4f436220384f847bbdee53cd7a8bce5fa089da61..e6761ea2feec1b4a4ca5c6580dd8f1cffbd036ba 100644 (file)
@@ -58,7 +58,7 @@ config ARM64
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_BPF_JIT
+       select HAVE_EBPF_JIT
        select HAVE_C_RECORDMCOUNT
        select HAVE_CC_STACKPROTECTOR
        select HAVE_CMPXCHG_DOUBLE
index efa77c146415b64d774a9811d30268e690d354d1..521b1ec5915759f61e0ef1ab6e8328ca0cd62256 100644 (file)
@@ -2,6 +2,7 @@ menu "Platform selection"
 
 config ARCH_SUNXI
        bool "Allwinner sunxi 64-bit SoC Family"
+       select GENERIC_IRQ_CHIP
        help
          This enables support for Allwinner sunxi based SoCs like the A64.
 
index a34420a5df9a2e5134beb26964f08c57d97c0d16..d0d51903c7e0e40a8ea5668559aa3a583ed33302 100644 (file)
@@ -31,8 +31,8 @@
 
 int bpf_jit_enable __read_mostly;
 
-#define TMP_REG_1 (MAX_BPF_REG + 0)
-#define TMP_REG_2 (MAX_BPF_REG + 1)
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
 
 /* Map BPF registers to A64 registers */
 static const int bpf2a64[] = {
@@ -54,6 +54,8 @@ static const int bpf2a64[] = {
        /* temporary register for internal BPF JIT */
        [TMP_REG_1] = A64_R(23),
        [TMP_REG_2] = A64_R(24),
+       /* temporary register for blinding constants */
+       [BPF_REG_AX] = A64_R(9),
 };
 
 struct jit_ctx {
@@ -476,6 +478,7 @@ emit_cond_jmp:
                case BPF_JGE:
                        jmp_cond = A64_COND_CS;
                        break;
+               case BPF_JSET:
                case BPF_JNE:
                        jmp_cond = A64_COND_NE;
                        break;
@@ -761,31 +764,45 @@ void bpf_jit_compile(struct bpf_prog *prog)
        /* Nothing to do here. We support Internal BPF. */
 }
 
-void bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
+       struct bpf_prog *tmp, *orig_prog = prog;
        struct bpf_binary_header *header;
+       bool tmp_blinded = false;
        struct jit_ctx ctx;
        int image_size;
        u8 *image_ptr;
 
        if (!bpf_jit_enable)
-               return;
+               return orig_prog;
 
-       if (!prog || !prog->len)
-               return;
+       tmp = bpf_jit_blind_constants(prog);
+       /* If blinding was requested and we failed during blinding,
+        * we must fall back to the interpreter.
+        */
+       if (IS_ERR(tmp))
+               return orig_prog;
+       if (tmp != prog) {
+               tmp_blinded = true;
+               prog = tmp;
+       }
 
        memset(&ctx, 0, sizeof(ctx));
        ctx.prog = prog;
 
        ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
-       if (ctx.offset == NULL)
-               return;
+       if (ctx.offset == NULL) {
+               prog = orig_prog;
+               goto out;
+       }
 
        /* 1. Initial fake pass to compute ctx->idx. */
 
        /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
-       if (build_body(&ctx))
-               goto out;
+       if (build_body(&ctx)) {
+               prog = orig_prog;
+               goto out_off;
+       }
 
        build_prologue(&ctx);
 
@@ -796,8 +813,10 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
        image_size = sizeof(u32) * ctx.idx;
        header = bpf_jit_binary_alloc(image_size, &image_ptr,
                                      sizeof(u32), jit_fill_hole);
-       if (header == NULL)
-               goto out;
+       if (header == NULL) {
+               prog = orig_prog;
+               goto out_off;
+       }
 
        /* 2. Now, the actual pass. */
 
@@ -808,7 +827,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
 
        if (build_body(&ctx)) {
                bpf_jit_binary_free(header);
-               goto out;
+               prog = orig_prog;
+               goto out_off;
        }
 
        build_epilogue(&ctx);
@@ -816,7 +836,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
        /* 3. Extra pass to validate JITed code. */
        if (validate_code(&ctx)) {
                bpf_jit_binary_free(header);
-               goto out;
+               prog = orig_prog;
+               goto out_off;
        }
 
        /* And we're done. */
@@ -828,8 +849,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
        set_memory_ro((unsigned long)header, header->pages);
        prog->bpf_func = (void *)ctx.image;
        prog->jited = 1;
-out:
+
+out_off:
        kfree(ctx.offset);
+out:
+       if (tmp_blinded)
+               bpf_jit_prog_release_other(prog, prog == orig_prog ?
+                                          tmp : orig_prog);
+       return prog;
 }
 
 void bpf_jit_free(struct bpf_prog *prog)
index 2018c2b0e078f9880bca0d2bb497e2dd271196d5..3ee1ea61b2dc7b571efe16bc66b133eddfdfed62 100644 (file)
@@ -15,7 +15,7 @@ config MIPS
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_BPF_JIT if !CPU_MICROMIPS
+       select HAVE_CBPF_JIT if !CPU_MICROMIPS
        select HAVE_FUNCTION_TRACER
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
index 7cd32c03828605c47f6a43949f3a246bcc4572d1..2fdb73d9198a0905f40ba06b543a86ec9ca6d9ec 100644 (file)
@@ -126,7 +126,7 @@ config PPC
        select IRQ_FORCED_THREADING
        select HAVE_RCU_TABLE_FREE if SMP
        select HAVE_SYSCALL_TRACEPOINTS
-       select HAVE_BPF_JIT
+       select HAVE_CBPF_JIT
        select HAVE_ARCH_JUMP_LABEL
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_HAS_GCOV_PROFILE_ALL
index bf24ab1889215abed3b8759dae2d45177ab960ab..a883981c0174a3ca3b14a7120166b920dc299504 100644 (file)
@@ -126,7 +126,7 @@ config S390
        select HAVE_ARCH_SOFT_DIRTY
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
-       select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
+       select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
        select HAVE_CMPXCHG_DOUBLE
        select HAVE_CMPXCHG_LOCAL
        select HAVE_DEBUG_KMEMLEAK
index 3c0bfc1f26941dde063cff501d4f1b24b8e72f5f..9133b0ec000b82037729ac8ff7e4912b252b13a2 100644 (file)
@@ -54,16 +54,17 @@ struct bpf_jit {
 #define SEEN_FUNC      16      /* calls C functions */
 #define SEEN_TAIL_CALL 32      /* code uses tail calls */
 #define SEEN_SKB_CHANGE        64      /* code changes skb data */
+#define SEEN_REG_AX    128     /* code uses constant blinding */
 #define SEEN_STACK     (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
  * s390 registers
  */
-#define REG_W0         (__MAX_BPF_REG+0)       /* Work register 1 (even) */
-#define REG_W1         (__MAX_BPF_REG+1)       /* Work register 2 (odd) */
-#define REG_SKB_DATA   (__MAX_BPF_REG+2)       /* SKB data register */
-#define REG_L          (__MAX_BPF_REG+3)       /* Literal pool register */
-#define REG_15         (__MAX_BPF_REG+4)       /* Register 15 */
+#define REG_W0         (MAX_BPF_JIT_REG + 0)   /* Work register 1 (even) */
+#define REG_W1         (MAX_BPF_JIT_REG + 1)   /* Work register 2 (odd) */
+#define REG_SKB_DATA   (MAX_BPF_JIT_REG + 2)   /* SKB data register */
+#define REG_L          (MAX_BPF_JIT_REG + 3)   /* Literal pool register */
+#define REG_15         (MAX_BPF_JIT_REG + 4)   /* Register 15 */
 #define REG_0          REG_W0                  /* Register 0 */
 #define REG_1          REG_W1                  /* Register 1 */
 #define REG_2          BPF_REG_1               /* Register 2 */
@@ -88,6 +89,8 @@ static const int reg2hex[] = {
        [BPF_REG_9]     = 10,
        /* BPF stack pointer */
        [BPF_REG_FP]    = 13,
+       /* Register for blinding (shared with REG_SKB_DATA) */
+       [BPF_REG_AX]    = 12,
        /* SKB data pointer */
        [REG_SKB_DATA]  = 12,
        /* Work registers for s390x backend */
@@ -385,7 +388,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
 /*
  * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
  * we store the SKB header length on the stack and the SKB data
- * pointer in REG_SKB_DATA.
+ * pointer in REG_SKB_DATA if BPF_REG_AX is not used.
  */
 static void emit_load_skb_data_hlen(struct bpf_jit *jit)
 {
@@ -397,9 +400,10 @@ static void emit_load_skb_data_hlen(struct bpf_jit *jit)
                   offsetof(struct sk_buff, data_len));
        /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
        EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
-       /* lg %skb_data,data_off(%b1) */
-       EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
-                     BPF_REG_1, offsetof(struct sk_buff, data));
+       if (!(jit->seen & SEEN_REG_AX))
+               /* lg %skb_data,data_off(%b1) */
+               EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+                             BPF_REG_1, offsetof(struct sk_buff, data));
 }
 
 /*
@@ -487,6 +491,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
        s32 imm = insn->imm;
        s16 off = insn->off;
 
+       if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
+               jit->seen |= SEEN_REG_AX;
        switch (insn->code) {
        /*
         * BPF_MOV
@@ -1188,7 +1194,7 @@ call_fn:
                /*
                 * Implicit input:
                 *  BPF_REG_6    (R7) : skb pointer
-                *  REG_SKB_DATA (R12): skb data pointer
+                *  REG_SKB_DATA (R12): skb data pointer (if no BPF_REG_AX)
                 *
                 * Calculated input:
                 *  BPF_REG_2    (R3) : offset of byte(s) to fetch in skb
@@ -1209,6 +1215,11 @@ call_fn:
                        /* agfr %b2,%src (%src is s32 here) */
                        EMIT4(0xb9180000, BPF_REG_2, src_reg);
 
+               /* Reload REG_SKB_DATA if BPF_REG_AX is used */
+               if (jit->seen & SEEN_REG_AX)
+                       /* lg %skb_data,data_off(%b6) */
+                       EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+                                     BPF_REG_6, offsetof(struct sk_buff, data));
                /* basr %b5,%w1 (%b5 is call saved) */
                EMIT2(0x0d00, BPF_REG_5, REG_W1);
 
@@ -1262,37 +1273,62 @@ void bpf_jit_compile(struct bpf_prog *fp)
 /*
  * Compile eBPF program "fp"
  */
-void bpf_int_jit_compile(struct bpf_prog *fp)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 {
+       struct bpf_prog *tmp, *orig_fp = fp;
        struct bpf_binary_header *header;
+       bool tmp_blinded = false;
        struct bpf_jit jit;
        int pass;
 
        if (!bpf_jit_enable)
-               return;
+               return orig_fp;
+
+       tmp = bpf_jit_blind_constants(fp);
+       /*
+        * If blinding was requested and we failed during blinding,
+        * we must fall back to the interpreter.
+        */
+       if (IS_ERR(tmp))
+               return orig_fp;
+       if (tmp != fp) {
+               tmp_blinded = true;
+               fp = tmp;
+       }
+
        memset(&jit, 0, sizeof(jit));
        jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
-       if (jit.addrs == NULL)
-               return;
+       if (jit.addrs == NULL) {
+               fp = orig_fp;
+               goto out;
+       }
        /*
         * Three initial passes:
         *   - 1/2: Determine clobbered registers
         *   - 3:   Calculate program size and addrs arrray
         */
        for (pass = 1; pass <= 3; pass++) {
-               if (bpf_jit_prog(&jit, fp))
+               if (bpf_jit_prog(&jit, fp)) {
+                       fp = orig_fp;
                        goto free_addrs;
+               }
        }
        /*
         * Final pass: Allocate and generate program
         */
-       if (jit.size >= BPF_SIZE_MAX)
+       if (jit.size >= BPF_SIZE_MAX) {
+               fp = orig_fp;
                goto free_addrs;
+       }
        header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
-       if (!header)
+       if (!header) {
+               fp = orig_fp;
                goto free_addrs;
-       if (bpf_jit_prog(&jit, fp))
+       }
+       if (bpf_jit_prog(&jit, fp)) {
+               fp = orig_fp;
                goto free_addrs;
+       }
        if (bpf_jit_enable > 1) {
                bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
                if (jit.prg_buf)
@@ -1305,6 +1341,11 @@ void bpf_int_jit_compile(struct bpf_prog *fp)
        }
 free_addrs:
        kfree(jit.addrs);
+out:
+       if (tmp_blinded)
+               bpf_jit_prog_release_other(fp, fp == orig_fp ?
+                                          tmp : orig_fp);
+       return fp;
 }
 
 /*
index 57ffaf285c2f511e1ee1549e8e002fa2f1e5d22c..d5003812c7485b3b35a2bfc53865bde047b1e38b 100644 (file)
@@ -32,7 +32,7 @@ config SPARC
        select ARCH_WANT_IPC_PARSE_VERSION
        select GENERIC_PCI_IOMAP
        select HAVE_NMI_WATCHDOG if SPARC64
-       select HAVE_BPF_JIT
+       select HAVE_CBPF_JIT
        select HAVE_DEBUG_BUGVERBOSE
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_CLOCKEVENTS
index 2dc18605831f6e88fd45c2863fc3d1fc7e6b6622..ae83046d51a8e08ab020b285c368390cc97a2003 100644 (file)
@@ -91,7 +91,7 @@ config X86
        select HAVE_ARCH_SOFT_DIRTY             if X86_64
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
-       select HAVE_BPF_JIT                     if X86_64
+       select HAVE_EBPF_JIT                    if X86_64
        select HAVE_CC_STACKPROTECTOR
        select HAVE_CMPXCHG_DOUBLE
        select HAVE_CMPXCHG_LOCAL
index f9affcc3b9f134939c606c880be807785fd7a23e..9906505c998aac2e0c5b07736acc0798555ad3cd 100644 (file)
@@ -26,3 +26,6 @@ CONFIG_VIRTIO_NET=y
 CONFIG_9P_FS=y
 CONFIG_NET_9P=y
 CONFIG_NET_9P_VIRTIO=y
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_VIRTIO_INPUT=y
index a6fd4dbcf820abf727b6118c0084a6877ec0340d..5210eaa4aa629daa19424456d759b760d7d0a43a 100644 (file)
@@ -3708,7 +3708,7 @@ __init int intel_pmu_init(void)
                                c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
                        }
                        c->idxmsk64 &=
-                               ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
+                               ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
                        c->weight = hweight64(c->idxmsk64);
                }
        }
index 09a77dbc73c93110a40d2afbf2dedc86e50ea44f..7377814de30b6aaba0478bc91bf9156b5be6322b 100644 (file)
@@ -709,6 +709,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
 
        /* clear STOP and INT from current entry */
        buf->topa_index[buf->stop_pos]->stop = 0;
+       buf->topa_index[buf->stop_pos]->intr = 0;
        buf->topa_index[buf->intr_pos]->intr = 0;
 
        /* how many pages till the STOP marker */
@@ -733,6 +734,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
        buf->intr_pos = idx;
 
        buf->topa_index[buf->stop_pos]->stop = 1;
+       buf->topa_index[buf->stop_pos]->intr = 1;
        buf->topa_index[buf->intr_pos]->intr = 1;
 
        return 0;
index ab2bcaaebe38d464ab7863c901ac41fc7b847711..b2625867ebd17543401cf6dbc5bf9e76fcd6ee6c 100644 (file)
 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID                0x1ff
 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE      (7 << 18)
 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP         (0xfffffe2aULL << 32)
+#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE        (0x1ULL << 32)
+#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
+#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC                (0x1ULL << 37)
 
 /* KNL EDC/MC UCLK */
 #define KNL_UCLK_MSR_PMON_CTR0_LOW             0x400
@@ -1902,6 +1905,10 @@ static int knl_cha_hw_config(struct intel_uncore_box *box,
                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
                            KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
                reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
+
+               reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
+               reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
+               reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
                reg1->idx = idx;
        }
        return 0;
index ec863b9a9f780c7507634353d64f9c2f76f1a0e1..8bef19f098d4d065a48a5d3e4ecc36b8158fc0ac 100644 (file)
@@ -166,7 +166,7 @@ again:
        if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
                delta = sign_extend64(delta, 31);
 
-       local64_add(now - prev, &event->count);
+       local64_add(delta, &event->count);
 }
 
 static void msr_event_start(struct perf_event *event, int flags)
index a969ae607be8323578865285b27cb443451b4483..2e7513d1f1f45eb3bb7ec4ebe459aec2fa76b46d 100644 (file)
@@ -108,6 +108,14 @@ struct exception_table_entry {
 
 #define ARCH_HAS_RELATIVE_EXTABLE
 
+#define swap_ex_entry_fixup(a, b, tmp, delta)                  \
+       do {                                                    \
+               (a)->fixup = (b)->fixup + (delta);              \
+               (b)->fixup = (tmp).fixup - (delta);             \
+               (a)->handler = (b)->handler + (delta);          \
+               (b)->handler = (tmp).handler - (delta);         \
+       } while (0)
+
 extern int fixup_exception(struct pt_regs *regs, int trapnr);
 extern bool ex_has_fault_handler(unsigned long ip);
 extern int early_fixup_exception(unsigned long *ip);
index 1f7fdb91a818bc10d4b975a070d6b2c1a947b15b..e4393bfc7f0d9bb39520adceeae2968652779a5c 100644 (file)
@@ -336,7 +336,7 @@ static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
 {
        unsigned int eax, ebx, ecx, edx;
 
-       if (c->cpuid_level < 4)
+       if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
                return 1;
 
        /* Intel has a non-standard dependency on %ecx for this CPUID level. */
index a2065d3b3b396f4503f4e4f42acc2af2bd4b307b..0e4329ed91ef61da5a86e5d604d1ad96109efbfb 100644 (file)
@@ -332,6 +332,11 @@ static void __init smp_init_package_map(void)
         * primary cores.
         */
        ncpus = boot_cpu_data.x86_max_cores;
+       if (!ncpus) {
+               pr_warn("x86_max_cores == zero !?!?");
+               ncpus = 1;
+       }
+
        __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
 
        /*
index 0f6294376fbdcaa2863956822583d09108cecada..a2f24af3c999ca5f14c553bf5e9875725c47668a 100644 (file)
@@ -5110,13 +5110,17 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
 
 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
 {
+       register void *__sp asm(_ASM_SP);
        ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
+
        if (!(ctxt->d & ByteOp))
                fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
+
        asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
            : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
-             [fastop]"+S"(fop)
+             [fastop]"+S"(fop), "+r"(__sp)
            : "c"(ctxt->src2.val));
+
        ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
        if (!fop) /* exception is returned in fop variable */
                return emulate_de(ctxt);
index 4286f3618bd07c32bba605874c4f9915be47820e..fe04a04dab8ec0df10c827623577102a994df71c 100644 (file)
@@ -110,11 +110,16 @@ static void bpf_flush_icache(void *start, void *end)
        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 
 /* pick a register outside of BPF range for JIT internal work */
-#define AUX_REG (MAX_BPF_REG + 1)
+#define AUX_REG (MAX_BPF_JIT_REG + 1)
 
-/* the following table maps BPF registers to x64 registers.
- * x64 register r12 is unused, since if used as base address register
- * in load/store instructions, it always needs an extra byte of encoding
+/* The following table maps BPF registers to x64 registers.
+ *
+ * x64 register r12 is unused, since if used as base address
+ * register in load/store instructions, it always needs an
+ * extra byte of encoding and is callee saved.
+ *
+ *  r9 caches skb->len - skb->data_len
+ * r10 caches skb->data, and used for blinding (if enabled)
  */
 static const int reg2hex[] = {
        [BPF_REG_0] = 0,  /* rax */
@@ -128,6 +133,7 @@ static const int reg2hex[] = {
        [BPF_REG_8] = 6,  /* r14 callee saved */
        [BPF_REG_9] = 7,  /* r15 callee saved */
        [BPF_REG_FP] = 5, /* rbp readonly */
+       [BPF_REG_AX] = 2, /* r10 temp register */
        [AUX_REG] = 3,    /* r11 temp register */
 };
 
@@ -141,7 +147,8 @@ static bool is_ereg(u32 reg)
                             BIT(AUX_REG) |
                             BIT(BPF_REG_7) |
                             BIT(BPF_REG_8) |
-                            BIT(BPF_REG_9));
+                            BIT(BPF_REG_9) |
+                            BIT(BPF_REG_AX));
 }
 
 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
@@ -182,6 +189,7 @@ static void jit_fill_hole(void *area, unsigned int size)
 struct jit_context {
        int cleanup_addr; /* epilogue code offset */
        bool seen_ld_abs;
+       bool seen_ax_reg;
 };
 
 /* maximum number of bytes emitted while JITing one eBPF insn */
@@ -345,6 +353,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
        struct bpf_insn *insn = bpf_prog->insnsi;
        int insn_cnt = bpf_prog->len;
        bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
+       bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
        bool seen_exit = false;
        u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
        int i, cnt = 0;
@@ -367,6 +376,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                int ilen;
                u8 *func;
 
+               if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
+                       ctx->seen_ax_reg = seen_ax_reg = true;
+
                switch (insn->code) {
                        /* ALU */
                case BPF_ALU | BPF_ADD | BPF_X:
@@ -1002,6 +1014,10 @@ common_load:
                         * sk_load_* helpers also use %r10 and %r9d.
                         * See bpf_jit.S
                         */
+                       if (seen_ax_reg)
+                               /* r10 = skb->data, mov %r10, off32(%rbx) */
+                               EMIT3_off32(0x4c, 0x8b, 0x93,
+                                           offsetof(struct sk_buff, data));
                        EMIT1_off32(0xE8, jmp_offset); /* call */
                        break;
 
@@ -1073,25 +1089,37 @@ void bpf_jit_compile(struct bpf_prog *prog)
 {
 }
 
-void bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
        struct bpf_binary_header *header = NULL;
+       struct bpf_prog *tmp, *orig_prog = prog;
        int proglen, oldproglen = 0;
        struct jit_context ctx = {};
+       bool tmp_blinded = false;
        u8 *image = NULL;
        int *addrs;
        int pass;
        int i;
 
        if (!bpf_jit_enable)
-               return;
+               return orig_prog;
 
-       if (!prog || !prog->len)
-               return;
+       tmp = bpf_jit_blind_constants(prog);
+       /* If blinding was requested and we failed during blinding,
+        * we must fall back to the interpreter.
+        */
+       if (IS_ERR(tmp))
+               return orig_prog;
+       if (tmp != prog) {
+               tmp_blinded = true;
+               prog = tmp;
+       }
 
        addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
-       if (!addrs)
-               return;
+       if (!addrs) {
+               prog = orig_prog;
+               goto out;
+       }
 
        /* Before first pass, make a rough estimation of addrs[]
         * each bpf instruction is translated to less than 64 bytes
@@ -1113,21 +1141,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
                        image = NULL;
                        if (header)
                                bpf_jit_binary_free(header);
-                       goto out;
+                       prog = orig_prog;
+                       goto out_addrs;
                }
                if (image) {
                        if (proglen != oldproglen) {
                                pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
                                       proglen, oldproglen);
-                               goto out;
+                               prog = orig_prog;
+                               goto out_addrs;
                        }
                        break;
                }
                if (proglen == oldproglen) {
                        header = bpf_jit_binary_alloc(proglen, &image,
                                                      1, jit_fill_hole);
-                       if (!header)
-                               goto out;
+                       if (!header) {
+                               prog = orig_prog;
+                               goto out_addrs;
+                       }
                }
                oldproglen = proglen;
        }
@@ -1141,8 +1173,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
                prog->bpf_func = (void *)image;
                prog->jited = 1;
        }
-out:
+
+out_addrs:
        kfree(addrs);
+out:
+       if (tmp_blinded)
+               bpf_jit_prog_release_other(prog, prog == orig_prog ?
+                                          tmp : orig_prog);
+       return prog;
 }
 
 void bpf_jit_free(struct bpf_prog *fp)
index a54f0543b956e5ccf5f20206e7983fe66ce2698d..b9f88b7751fbd87742b1d1439a1d89c97818f9ce 100644 (file)
@@ -9,24 +9,6 @@
 
 #include "blk.h"
 
-static bool iovec_gap_to_prv(struct request_queue *q,
-                            struct iovec *prv, struct iovec *cur)
-{
-       unsigned long prev_end;
-
-       if (!queue_virt_boundary(q))
-               return false;
-
-       if (prv->iov_base == NULL && prv->iov_len == 0)
-               /* prv is not set - don't check */
-               return false;
-
-       prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
-
-       return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
-               prev_end & queue_virt_boundary(q));
-}
-
 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
                      struct bio *bio)
 {
@@ -125,31 +107,18 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                        struct rq_map_data *map_data,
                        const struct iov_iter *iter, gfp_t gfp_mask)
 {
-       struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
-       bool copy = (q->dma_pad_mask & iter->count) || map_data;
+       bool copy = false;
+       unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
        struct bio *bio = NULL;
        struct iov_iter i;
        int ret;
 
-       if (!iter || !iter->count)
-               return -EINVAL;
-
-       iov_for_each(iov, i, *iter) {
-               unsigned long uaddr = (unsigned long) iov.iov_base;
-
-               if (!iov.iov_len)
-                       return -EINVAL;
-
-               /*
-                * Keep going so we check length of all segments
-                */
-               if ((uaddr & queue_dma_alignment(q)) ||
-                   iovec_gap_to_prv(q, &prv, &iov))
-                       copy = true;
-
-               prv.iov_base = iov.iov_base;
-               prv.iov_len = iov.iov_len;
-       }
+       if (map_data)
+               copy = true;
+       else if (iov_iter_alignment(iter) & align)
+               copy = true;
+       else if (queue_virt_boundary(q))
+               copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
 
        i = *iter;
        do {
index b86883aedca11a617ad2e19bafe646bfecac583a..7d4acc4492338921dd1307c1cc4d4a252130b36a 100644 (file)
@@ -1776,6 +1776,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
 static int do_test_rsa(struct crypto_akcipher *tfm,
                       struct akcipher_testvec *vecs)
 {
+       char *xbuf[XBUFSIZE];
        struct akcipher_request *req;
        void *outbuf_enc = NULL;
        void *outbuf_dec = NULL;
@@ -1784,9 +1785,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
        int err = -ENOMEM;
        struct scatterlist src, dst, src_tab[2];
 
+       if (testmgr_alloc_buf(xbuf))
+               return err;
+
        req = akcipher_request_alloc(tfm, GFP_KERNEL);
        if (!req)
-               return err;
+               goto free_xbuf;
 
        init_completion(&result.completion);
 
@@ -1804,9 +1808,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
        if (!outbuf_enc)
                goto free_req;
 
+       if (WARN_ON(vecs->m_size > PAGE_SIZE))
+               goto free_all;
+
+       memcpy(xbuf[0], vecs->m, vecs->m_size);
+
        sg_init_table(src_tab, 2);
-       sg_set_buf(&src_tab[0], vecs->m, 8);
-       sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
+       sg_set_buf(&src_tab[0], xbuf[0], 8);
+       sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
        sg_init_one(&dst, outbuf_enc, out_len_max);
        akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
                                   out_len_max);
@@ -1825,7 +1834,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
                goto free_all;
        }
        /* verify that encrypted message is equal to expected */
-       if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
+       if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
                pr_err("alg: rsa: encrypt test failed. Invalid output\n");
                err = -EINVAL;
                goto free_all;
@@ -1840,7 +1849,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
                err = -ENOMEM;
                goto free_all;
        }
-       sg_init_one(&src, vecs->c, vecs->c_size);
+
+       if (WARN_ON(vecs->c_size > PAGE_SIZE))
+               goto free_all;
+
+       memcpy(xbuf[0], vecs->c, vecs->c_size);
+
+       sg_init_one(&src, xbuf[0], vecs->c_size);
        sg_init_one(&dst, outbuf_dec, out_len_max);
        init_completion(&result.completion);
        akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
@@ -1867,6 +1882,8 @@ free_all:
        kfree(outbuf_enc);
 free_req:
        akcipher_request_free(req);
+free_xbuf:
+       testmgr_free_buf(xbuf);
        return err;
 }
 
index 5c79526245c2e3309a28766b781a9f2bccc850d5..a0380338946a1dbd6cb7e54ab70b001edc77eef7 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef _REGMAP_INTERNAL_H
 #define _REGMAP_INTERNAL_H
 
+#include <linux/device.h>
 #include <linux/regmap.h>
 #include <linux/fs.h>
 #include <linux/list.h>
index 7526906ca080f81dcff1499b7e57c2ff0b79569e..5189fd6182f6c6126b21b1d6b3e25f760d4df290 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/regmap.h>
 #include <linux/slab.h>
 
+#include "internal.h"
+
 struct regmap_mmio_context {
        void __iomem *regs;
        unsigned val_bytes;
@@ -212,6 +214,7 @@ static const struct regmap_bus regmap_mmio = {
        .reg_write = regmap_mmio_write,
        .reg_read = regmap_mmio_read,
        .free_context = regmap_mmio_free_context,
+       .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
 };
 
 static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
@@ -245,7 +248,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
        ctx->val_bytes = config->val_bits / 8;
        ctx->clk = ERR_PTR(-ENODEV);
 
-       switch (config->reg_format_endian) {
+       switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
        case REGMAP_ENDIAN_DEFAULT:
        case REGMAP_ENDIAN_LITTLE:
 #ifdef __LITTLE_ENDIAN
index 7e58f656039900e633729828f5eae5fb1700b93a..4a36e415e938560ce2e3ba927b81888444767a7d 100644 (file)
@@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
        while (val_size) {
                len = min_t(size_t, val_size, 8);
 
-               err = spmi_ext_register_readl(context, addr, val, val_size);
+               err = spmi_ext_register_readl(context, addr, val, len);
                if (err)
                        goto err_out;
 
index bf731e9f643e9ecddd367034179fae6a40d294ce..7f85c2c1d68156a4d91bd4b18332df6f1886fb7c 100644 (file)
@@ -276,8 +276,8 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
                        }
                }
        } else {
-               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-                       for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+                       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
                                max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
                                if (max_pix_clock >= pix_clock) {
                                        *dp_lanes = lane_num;
index a0f1bd711b533910ce00a2bbe642591419c4d198..e3f4c725a1c6910f3431431d9a2dbc538153f025 100644 (file)
@@ -2872,20 +2872,6 @@ static void intel_dp_info(struct seq_file *m,
                intel_panel_info(m, &intel_connector->panel);
 }
 
-static void intel_dp_mst_info(struct seq_file *m,
-                         struct intel_connector *intel_connector)
-{
-       struct intel_encoder *intel_encoder = intel_connector->encoder;
-       struct intel_dp_mst_encoder *intel_mst =
-               enc_to_mst(&intel_encoder->base);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
-       bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
-                                       intel_connector->port);
-
-       seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
-}
-
 static void intel_hdmi_info(struct seq_file *m,
                            struct intel_connector *intel_connector)
 {
@@ -2929,8 +2915,6 @@ static void intel_connector_info(struct seq_file *m,
                        intel_hdmi_info(m, intel_connector);
                else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
                        intel_lvds_info(m, intel_connector);
-               else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
-                       intel_dp_mst_info(m, intel_connector);
        }
 
        seq_printf(m, "\tmodes:\n");
index fffdac801d3b0da03abdd65b2c90e50fb1913e54..363bd79dea2ef476bdf0f14886248175310720f6 100644 (file)
@@ -7444,6 +7444,8 @@ enum skl_disp_power_wells {
 #define  TRANS_CLK_SEL_DISABLED                (0x0<<29)
 #define  TRANS_CLK_SEL_PORT(x)         (((x)+1)<<29)
 
+#define CDCLK_FREQ                     _MMIO(0x46200)
+
 #define _TRANSA_MSA_MISC               0x60410
 #define _TRANSB_MSA_MISC               0x61410
 #define _TRANSC_MSA_MISC               0x62410
index 30f921421b0c944217832ba86856a6904f8fef11..7d281b40064a47f7e46071ef897c4720da38f9d8 100644 (file)
@@ -262,8 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
        tmp |= AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_UPPER_N_MASK;
        tmp &= ~AUD_CONFIG_LOWER_N_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-           intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
+       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        I915_WRITE(HSW_AUD_CFG(pipe), tmp);
 
@@ -476,8 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
        tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
        tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-           intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
+       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        else
                tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -515,8 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
 
        /* ELD Conn_Type */
        connector->eld[5] &= ~(3 << 2);
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
                connector->eld[5] |= (1 << 2);
 
        connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
index 505fc5cf26f845217b5bd58c8fe0a865bde5170d..0364292367b1425a297cd35e6ae81da2cf18dbc3 100644 (file)
@@ -257,8 +257,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
                pipe_config->has_pch_encoder = true;
 
        /* LPT FDI RX only supports 8bpc. */
-       if (HAS_PCH_LPT(dev))
+       if (HAS_PCH_LPT(dev)) {
+               if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+                       DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+                       return false;
+               }
+
                pipe_config->pipe_bpp = 24;
+       }
 
        /* FDI must always be 2.7 GHz */
        if (HAS_DDI(dev)) {
index 3b57bf06abe8598c1c3b6fba0dc8e19b7f192619..96ffcc541e17697b69bad69fb24fe5bec3866aab 100644 (file)
@@ -3106,23 +3106,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
        I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 }
 
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
-                                struct intel_crtc *intel_crtc)
-{
-       u32 temp;
-
-       if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
-               temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-
-               intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
-               if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
-                       return true;
-       }
-
-       return false;
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config)
 {
@@ -3183,8 +3166,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                break;
        }
 
-       pipe_config->has_audio =
-               intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
+       if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+               temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+               if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+                       pipe_config->has_audio = true;
+       }
 
        if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
            pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
index 182f84937345d5b3d778d9521ca17f4b04608037..0104a06d01fd6617f54bedf68353935668eee45b 100644 (file)
@@ -7988,9 +7988,6 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
 
        pipe_config->gmch_pfit.control = tmp;
        pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
-       if (INTEL_INFO(dev)->gen < 5)
-               pipe_config->gmch_pfit.lvds_border_bits =
-                       I915_READ(LVDS) & LVDS_BORDER_ENABLE;
 }
 
 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9752,6 +9749,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
        sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
+       I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+
        intel_update_cdclk(dev);
 
        WARN(cdclk != dev_priv->cdclk_freq,
index 937e77228466eb22e66ac765d001078a418badfd..2c999725b3d4b3ac10d9599227e00a1b392ae4f2 100644 (file)
@@ -78,8 +78,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
                return false;
        }
 
-       if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
-               pipe_config->has_audio = true;
        mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
 
        pipe_config->pbn = mst_pbn;
@@ -104,11 +102,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = encoder->base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
        int ret;
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@@ -119,10 +112,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
        if (ret) {
                DRM_ERROR("failed to update payload %d\n", ret);
        }
-       if (intel_crtc->config->has_audio) {
-               intel_audio_codec_disable(encoder);
-               intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-       }
 }
 
 static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
@@ -221,7 +210,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        enum port port = intel_dig_port->port;
        int ret;
 
@@ -234,13 +222,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
        ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
 
        ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
-
-       if (crtc->config->has_audio) {
-               DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
-                                pipe_name(crtc->pipe));
-               intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-               intel_audio_codec_enable(encoder);
-       }
 }
 
 static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -266,9 +247,6 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
 
        pipe_config->has_dp_encoder = true;
 
-       pipe_config->has_audio =
-               intel_ddi_is_audio_enabled(dev_priv, crtc);
-
        temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
        if (temp & TRANS_DDI_PHSYNC)
                flags |= DRM_MODE_FLAG_PHSYNC;
index 7d3af3a72abea7ac557f5f107aaf597bc38d5165..9d0770c23fdece738575cc70a372353b7ea6284e 100644 (file)
@@ -1019,8 +1019,6 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
 void intel_ddi_fdi_disable(struct drm_crtc *crtc);
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
-                                struct intel_crtc *intel_crtc);
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config);
 struct intel_encoder *
index cd9fe609aefbc2487ce94ab01e0d495dd12d8167..10dc3517b63b32921437bcff3d2352829061c4ed 100644 (file)
@@ -123,6 +123,10 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 
        pipe_config->base.adjusted_mode.flags |= flags;
 
+       if (INTEL_INFO(dev)->gen < 5)
+               pipe_config->gmch_pfit.lvds_border_bits =
+                       tmp & LVDS_BORDER_ENABLE;
+
        /* gen2/3 store dither state in pfit control, needs to match */
        if (INTEL_INFO(dev)->gen < 4) {
                tmp = I915_READ(PFIT_CONTROL);
index 8ed3cf34f82d31bbefa98847d1413f625e2b2f74..3425d8e737b344ec8d43bdeab38957e2667d00bf 100644 (file)
@@ -6646,6 +6646,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
        misccpctl = I915_READ(GEN7_MISCCPCTL);
        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
        I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+       /*
+        * Wait at least 100 clocks before re-enabling clock gating. See
+        * the definition of L3SQCREG1 in BSpec.
+        */
+       POSTING_READ(GEN8_L3SQCREG1);
+       udelay(1);
        I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
        /*
index b80b08f71cb46e8d69d7bd94f6d951008267500e..532127c55de64197698336c92d602b0ff3043071 100644 (file)
@@ -1742,6 +1742,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
 static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_crtc *test_crtc;
        struct radeon_crtc *test_radeon_crtc;
 
@@ -1751,6 +1752,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
                test_radeon_crtc = to_radeon_crtc(test_crtc);
                if (test_radeon_crtc->encoder &&
                    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+                       /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+                       if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+                           test_radeon_crtc->pll_id == ATOM_PPLL2)
+                               continue;
                        /* for DP use the same PLL for all */
                        if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
                                return test_radeon_crtc->pll_id;
@@ -1772,6 +1777,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_crtc *test_crtc;
        struct radeon_crtc *test_radeon_crtc;
        u32 adjusted_clock, test_adjusted_clock;
@@ -1787,6 +1793,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
                test_radeon_crtc = to_radeon_crtc(test_crtc);
                if (test_radeon_crtc->encoder &&
                    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+                       /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+                       if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+                           test_radeon_crtc->pll_id == ATOM_PPLL2)
+                               continue;
                        /* check if we are already driving this connector with another crtc */
                        if (test_radeon_crtc->connector == radeon_crtc->connector) {
                                /* if we are, return that pll */
index afa9db1dc0e3dfcc14f51bf3e74b2f2d3c5702c5..cead089a9e7d2ea1bc78046aeed4d6cb7e8a02d6 100644 (file)
@@ -326,8 +326,8 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
                        }
                }
        } else {
-               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-                       for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+                       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
                                max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
                                if (max_pix_clock >= pix_clock) {
                                        *dp_lanes = lane_num;
index 3b0c229d7dcd23ffb7184ad79e2ebaa8f001cca9..db64e0062689b076842b9710c33e4660c96e9985 100644 (file)
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
 
        tmp &= AUX_HPD_SEL(0x7);
        tmp |= AUX_HPD_SEL(chan->rec.hpd);
-       tmp |= AUX_EN | AUX_LS_READ_EN;
+       tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
 
        WREG32(AUX_CONTROL + aux_offset[instance], tmp);
 
index a806ba3818f7267dd2036dd870c4f60ca28dc0d5..8d6326d7e7beaf1875bb95af385dff1285f10b0e 100644 (file)
@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
        struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        const struct max8997_platform_data *pdata =
                                        dev_get_platdata(iodev->dev);
-       const struct max8997_haptic_platform_data *haptic_pdata =
-                                       pdata->haptic_pdata;
+       const struct max8997_haptic_platform_data *haptic_pdata = NULL;
        struct max8997_haptic *chip;
        struct input_dev *input_dev;
        int error;
 
+       if (pdata)
+               haptic_pdata = pdata->haptic_pdata;
+
        if (!haptic_pdata) {
                dev_err(&pdev->dev, "no haptic platform data\n");
                return -EINVAL;
index df3581f606282a2890b8ce78cf13d9e4882c3c0a..42de34b9299633f5104f8127e3c8bb50ec83f294 100644 (file)
@@ -257,6 +257,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
        int vddvibr_uV = 0;
        int error;
 
+       of_node_get(twl6040_core_dev->of_node);
        twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
                                                 "vibra");
        if (!twl6040_core_node) {
index fdc243ca93ed7c50c89c18de6477e6b3c6d44a6f..e583f8b504549c6415f697f5d746a38e440dc556 100644 (file)
@@ -2,6 +2,10 @@
  * BYD TouchPad PS/2 mouse driver
  *
  * Copyright (C) 2015 Chris Diamand <chris@diamand.org>
+ * Copyright (C) 2015 Richard Pospesel
+ * Copyright (C) 2015 Tai Chi Minh Ralph Eastwood
+ * Copyright (C) 2015 Martin Wimpress
+ * Copyright (C) 2015 Jay Kuri
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
index 7f366f1b0377a3557201a1bf64470472a29d5658..0b1b8c7b6ce51e69cd2c7f9e8c3eec7f6b6744eb 100644 (file)
@@ -74,11 +74,6 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
        return 0;
 }
 
-static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
-{
-       return __verify_planes_array(vb, pb);
-}
-
 /**
  * __verify_length() - Verify that the bytesused value for each plane fits in
  * the plane length and that the data offset doesn't exceed the bytesused value.
@@ -442,7 +437,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
 }
 
 static const struct vb2_buf_ops v4l2_buf_ops = {
-       .verify_planes_array    = __verify_planes_array_core,
        .fill_user_buffer       = __fill_v4l2_buffer,
        .fill_vb2_buffer        = __fill_vb2_buffer,
        .copy_timestamp         = __copy_timestamp,
index a3f0e7ec406766733df313f0bc1f7c73d24461e3..ba9dfc9421ef806581f8df8a0afc418c857e3201 100644 (file)
@@ -1373,6 +1373,7 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int stp_state;
+       int err;
 
        if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE))
                return;
@@ -1394,12 +1395,13 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
                break;
        }
 
-       /* mv88e6xxx_port_stp_state_set may be called with softirqs disabled,
-        * so we can not update the port state directly but need to schedule it.
-        */
-       ps->ports[port].state = stp_state;
-       set_bit(port, ps->port_state_update_mask);
-       schedule_work(&ps->bridge_work);
+       mutex_lock(&ps->smi_mutex);
+       err = _mv88e6xxx_port_state(ps, port, stp_state);
+       mutex_unlock(&ps->smi_mutex);
+
+       if (err)
+               netdev_err(ds->ports[port], "failed to update state to %s\n",
+                          mv88e6xxx_port_state_names[stp_state]);
 }
 
 static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
@@ -2535,27 +2537,6 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
        mutex_unlock(&ps->smi_mutex);
 }
 
-static void mv88e6xxx_bridge_work(struct work_struct *work)
-{
-       struct mv88e6xxx_priv_state *ps;
-       struct dsa_switch *ds;
-       int port;
-
-       ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
-       ds = ps->ds;
-
-       mutex_lock(&ps->smi_mutex);
-
-       for (port = 0; port < ps->info->num_ports; ++port)
-               if (test_and_clear_bit(port, ps->port_state_update_mask) &&
-                   _mv88e6xxx_port_state(ps, port, ps->ports[port].state))
-                       netdev_warn(ds->ports[port],
-                                   "failed to update state to %s\n",
-                                   mv88e6xxx_port_state_names[ps->ports[port].state]);
-
-       mutex_unlock(&ps->smi_mutex);
-}
-
 static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
                                     int port, int page, int reg, int val)
 {
@@ -3145,8 +3126,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
 
        ps->ds = ds;
 
-       INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
-
        if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
                mutex_init(&ps->eeprom_mutex);
 
index 40e8721ecfb1d0832697bf653f90d2d84792f011..36d0e1504de1d35e030e1a306b7a90d2a284b315 100644 (file)
@@ -543,7 +543,6 @@ struct mv88e6xxx_vtu_stu_entry {
 
 struct mv88e6xxx_priv_port {
        struct net_device *bridge_dev;
-       u8 state;
 };
 
 struct mv88e6xxx_priv_state {
@@ -593,10 +592,6 @@ struct mv88e6xxx_priv_state {
 
        struct mv88e6xxx_priv_port      ports[DSA_MAX_PORTS];
 
-       DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS);
-
-       struct work_struct bridge_work;
-
        /* A switch may have a GPIO line tied to its reset pin. Parse
         * this from the device tree, and use it before performing
         * switch soft reset.
index 64792880e9407b38944113a6bcbe42b67fac8405..472c0fb3f4c4c05dd4eb3f9df2d108ebff6999aa 100644 (file)
@@ -43,6 +43,7 @@ static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel,
 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
                                  struct xgene_cle_dbptr *dbptr, u32 *buf)
 {
+       buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
        buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
                 SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
 
@@ -412,7 +413,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                        .branch = {
                                {
                                        /* IPV4 */
-                                       .valid = 0,
+                                       .valid = 1,
                                        .next_packet_pointer = 22,
                                        .jump_bw = JMP_FW,
                                        .jump_rel = JMP_ABS,
@@ -420,7 +421,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                                        .next_node = PKT_PROT_NODE,
                                        .next_branch = 0,
                                        .data = 0x8,
-                                       .mask = 0xffff
+                                       .mask = 0x0
                                },
                                {
                                        .valid = 0,
@@ -456,7 +457,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                                        .next_node = RSS_IPV4_TCP_NODE,
                                        .next_branch = 0,
                                        .data = 0x0600,
-                                       .mask = 0xffff
+                                       .mask = 0x00ff
                                },
                                {
                                        /* UDP */
@@ -468,7 +469,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                                        .next_node = RSS_IPV4_UDP_NODE,
                                        .next_branch = 0,
                                        .data = 0x1100,
-                                       .mask = 0xffff
+                                       .mask = 0x00ff
                                },
                                {
                                        .valid = 0,
@@ -642,7 +643,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                                {
                                        /* TCP DST Port */
                                        .valid = 0,
-                                       .next_packet_pointer = 256,
+                                       .next_packet_pointer = 258,
                                        .jump_bw = JMP_FW,
                                        .jump_rel = JMP_ABS,
                                        .operation = EQT,
index 13e829ab9053a5d996596efcdfe3c1bb2f264820..33c5f6b258245cd0ceb963640f68212c0528b4d4 100644 (file)
@@ -83,6 +83,8 @@
 #define CLE_TYPE_POS           0
 #define CLE_TYPE_LEN           2
 
+#define CLE_DROP_POS           28
+#define CLE_DROP_LEN           1
 #define CLE_DSTQIDL_POS                25
 #define CLE_DSTQIDL_LEN                7
 #define CLE_DSTQIDH_POS                0
index 457f745002424fc96817721b00b2261ff5bfb926..2f5638f7f864bc0a426fe4f94a9f28087ec5780d 100644 (file)
@@ -219,27 +219,30 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
                            struct xgene_enet_pdata *pdata,
                            enum xgene_enet_err_code status)
 {
-       struct rtnl_link_stats64 *stats = &pdata->stats;
-
        switch (status) {
        case INGRESS_CRC:
-               stats->rx_crc_errors++;
+               ring->rx_crc_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_CHECKSUM:
        case INGRESS_CHECKSUM_COMPUTE:
-               stats->rx_errors++;
+               ring->rx_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_TRUNC_FRAME:
-               stats->rx_frame_errors++;
+               ring->rx_frame_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_PKT_LEN:
-               stats->rx_length_errors++;
+               ring->rx_length_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_PKT_UNDER:
-               stats->rx_frame_errors++;
+               ring->rx_frame_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_FIFO_OVERRUN:
-               stats->rx_fifo_errors++;
+               ring->rx_fifo_errors++;
                break;
        default:
                break;
index ba7da98af2efb48c2a5811244ea4776185ce2534..45220be3122f99ecf50c96e5f83fb6c8399108ac 100644 (file)
@@ -86,7 +86,7 @@ enum xgene_enet_rm {
 #define RINGADDRL_POS          5
 #define RINGADDRL_LEN          27
 #define RINGADDRH_POS          0
-#define RINGADDRH_LEN          6
+#define RINGADDRH_LEN          7
 #define RINGSIZE_POS           23
 #define RINGSIZE_LEN           3
 #define RINGTYPE_POS           19
@@ -94,9 +94,9 @@ enum xgene_enet_rm {
 #define RINGMODE_POS           20
 #define RINGMODE_LEN           3
 #define RECOMTIMEOUTL_POS      28
-#define RECOMTIMEOUTL_LEN      3
+#define RECOMTIMEOUTL_LEN      4
 #define RECOMTIMEOUTH_POS      0
-#define RECOMTIMEOUTH_LEN      2
+#define RECOMTIMEOUTH_LEN      3
 #define NUMMSGSINQ_POS         1
 #define NUMMSGSINQ_LEN         16
 #define ACCEPTLERR             BIT(19)
@@ -201,6 +201,8 @@ enum xgene_enet_rm {
 #define USERINFO_LEN                   32
 #define FPQNUM_POS                     32
 #define FPQNUM_LEN                     12
+#define ELERR_POS                       46
+#define ELERR_LEN                       2
 #define NV_POS                         50
 #define NV_LEN                         1
 #define LL_POS                         51
index aa87049c353d264adcd3839b7af1639639222c5d..d208b172f4d7b662ecaeb8710cabf0f3b4d7228d 100644 (file)
@@ -443,8 +443,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
 
        skb_tx_timestamp(skb);
 
-       pdata->stats.tx_packets++;
-       pdata->stats.tx_bytes += skb->len;
+       tx_ring->tx_packets++;
+       tx_ring->tx_bytes += skb->len;
 
        pdata->ring_ops->wr_cmd(tx_ring, count);
        return NETDEV_TX_OK;
@@ -483,12 +483,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
        skb = buf_pool->rx_skb[skb_index];
 
        /* checking for error */
-       status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
+       status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
+                 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
        if (unlikely(status > 2)) {
                dev_kfree_skb_any(skb);
                xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
                                       status);
-               pdata->stats.rx_dropped++;
                ret = -EIO;
                goto out;
        }
@@ -506,8 +506,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
                xgene_enet_skip_csum(skb);
        }
 
-       pdata->stats.rx_packets++;
-       pdata->stats.rx_bytes += datalen;
+       rx_ring->rx_packets++;
+       rx_ring->rx_bytes += datalen;
        napi_gro_receive(&rx_ring->napi, skb);
 out:
        if (--rx_ring->nbufpool == 0) {
@@ -630,7 +630,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
                ring = pdata->rx_ring[i];
                irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
                ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
-                                      IRQF_SHARED, ring->irq_name, ring);
+                                      0, ring->irq_name, ring);
                if (ret) {
                        netdev_err(ndev, "Failed to request irq %s\n",
                                   ring->irq_name);
@@ -641,7 +641,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
                ring = pdata->tx_ring[i]->cp_ring;
                irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
                ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
-                                      IRQF_SHARED, ring->irq_name, ring);
+                                      0, ring->irq_name, ring);
                if (ret) {
                        netdev_err(ndev, "Failed to request irq %s\n",
                                   ring->irq_name);
@@ -1127,12 +1127,31 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64(
 {
        struct xgene_enet_pdata *pdata = netdev_priv(ndev);
        struct rtnl_link_stats64 *stats = &pdata->stats;
+       struct xgene_enet_desc_ring *ring;
+       int i;
 
-       stats->rx_errors += stats->rx_length_errors +
-                           stats->rx_crc_errors +
-                           stats->rx_frame_errors +
-                           stats->rx_fifo_errors;
-       memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
+       memset(stats, 0, sizeof(struct rtnl_link_stats64));
+       for (i = 0; i < pdata->txq_cnt; i++) {
+               ring = pdata->tx_ring[i];
+               if (ring) {
+                       stats->tx_packets += ring->tx_packets;
+                       stats->tx_bytes += ring->tx_bytes;
+               }
+       }
+
+       for (i = 0; i < pdata->rxq_cnt; i++) {
+               ring = pdata->rx_ring[i];
+               if (ring) {
+                       stats->rx_packets += ring->rx_packets;
+                       stats->rx_bytes += ring->rx_bytes;
+                       stats->rx_errors += ring->rx_length_errors +
+                               ring->rx_crc_errors +
+                               ring->rx_frame_errors +
+                               ring->rx_fifo_errors;
+                       stats->rx_dropped += ring->rx_dropped;
+               }
+       }
+       memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
 
        return storage;
 }
@@ -1247,6 +1266,13 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
        for (i = 0; i < max_irqs; i++) {
                ret = platform_get_irq(pdev, i);
                if (ret <= 0) {
+                       if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+                               max_irqs = i;
+                               pdata->rxq_cnt = max_irqs / 2;
+                               pdata->txq_cnt = max_irqs / 2;
+                               pdata->cq_cnt = max_irqs / 2;
+                               break;
+                       }
                        dev_err(dev, "Unable to get ENET IRQ\n");
                        ret = ret ? : -ENXIO;
                        return ret;
@@ -1450,19 +1476,28 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
                pdata->port_ops = &xgene_xgport_ops;
                pdata->cle_ops = &xgene_cle3in_ops;
                pdata->rm = RM0;
-               pdata->rxq_cnt = XGENE_NUM_RX_RING;
-               pdata->txq_cnt = XGENE_NUM_TX_RING;
-               pdata->cq_cnt = XGENE_NUM_TXC_RING;
+               if (!pdata->rxq_cnt) {
+                       pdata->rxq_cnt = XGENE_NUM_RX_RING;
+                       pdata->txq_cnt = XGENE_NUM_TX_RING;
+                       pdata->cq_cnt = XGENE_NUM_TXC_RING;
+               }
                break;
        }
 
        if (pdata->enet_id == XGENE_ENET1) {
                switch (pdata->port_id) {
                case 0:
-                       pdata->cpu_bufnum = START_CPU_BUFNUM_0;
-                       pdata->eth_bufnum = START_ETH_BUFNUM_0;
-                       pdata->bp_bufnum = START_BP_BUFNUM_0;
-                       pdata->ring_num = START_RING_NUM_0;
+                       if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+                               pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+                               pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+                               pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+                               pdata->ring_num = START_RING_NUM_0;
+                       } else {
+                               pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+                               pdata->eth_bufnum = START_ETH_BUFNUM_0;
+                               pdata->bp_bufnum = START_BP_BUFNUM_0;
+                               pdata->ring_num = START_RING_NUM_0;
+                       }
                        break;
                case 1:
                        if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
index 0a2887b96a4256d389729908e47a142d07218936..092fbeccaa2054841f83923becd05352aefa4232 100644 (file)
 #define XGENE_ENET_MSS 1448
 #define XGENE_MIN_ENET_FRAME_SIZE      60
 
-#define XGENE_MAX_ENET_IRQ     8
-#define XGENE_NUM_RX_RING      4
-#define XGENE_NUM_TX_RING      4
-#define XGENE_NUM_TXC_RING     4
+#define XGENE_MAX_ENET_IRQ     16
+#define XGENE_NUM_RX_RING      8
+#define XGENE_NUM_TX_RING      8
+#define XGENE_NUM_TXC_RING     8
 
 #define START_CPU_BUFNUM_0     0
 #define START_ETH_BUFNUM_0     2
@@ -121,6 +121,16 @@ struct xgene_enet_desc_ring {
                struct xgene_enet_raw_desc16 *raw_desc16;
        };
        __le64 *exp_bufs;
+       u64 tx_packets;
+       u64 tx_bytes;
+       u64 rx_packets;
+       u64 rx_bytes;
+       u64 rx_dropped;
+       u64 rx_errors;
+       u64 rx_length_errors;
+       u64 rx_crc_errors;
+       u64 rx_frame_errors;
+       u64 rx_fifo_errors;
 };
 
 struct xgene_mac_ops {
index 29a71b4dcc44f361ef61c90bee1575984a5f47ea..002df5a6756e06e2e9f6100dba0a13d9584227d6 100644 (file)
@@ -33,7 +33,7 @@
 #define LINK_STATUS                    BIT(2)
 #define LINK_UP                                BIT(15)
 #define MPA_IDLE_WITH_QMI_EMPTY                BIT(12)
-#define SG_RX_DV_GATE_REG_0_ADDR       0x0dfc
+#define SG_RX_DV_GATE_REG_0_ADDR       0x05fc
 
 extern const struct xgene_mac_ops xgene_sgmac_ops;
 extern const struct xgene_port_ops xgene_sgport_ops;
index 6a5a71710fa9821c00f3697693672f958b262906..5a0dca3e6ef6a23f08421d9667c5f7d83efe46c8 100644 (file)
@@ -78,6 +78,7 @@ enum board_idx {
        BCM57402,
        BCM57404,
        BCM57406,
+       BCM57314,
        BCM57304_VF,
        BCM57404_VF,
 };
@@ -92,6 +93,7 @@ static const struct {
        { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
        { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
        { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+       { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
        { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
        { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
 };
@@ -103,6 +105,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
        { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
        { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
        { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+       { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
 #ifdef CONFIG_BNXT_SRIOV
        { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
        { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
@@ -820,6 +823,46 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
        return skb;
 }
 
+static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
+                          u32 *raw_cons, void *cmp)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct rx_cmp *rxcmp = cmp;
+       u32 tmp_raw_cons = *raw_cons;
+       u8 cmp_type, agg_bufs = 0;
+
+       cmp_type = RX_CMP_TYPE(rxcmp);
+
+       if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+               agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
+                           RX_CMP_AGG_BUFS) >>
+                          RX_CMP_AGG_BUFS_SHIFT;
+       } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+               struct rx_tpa_end_cmp *tpa_end = cmp;
+
+               agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+                           RX_TPA_END_CMP_AGG_BUFS) >>
+                          RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+       }
+
+       if (agg_bufs) {
+               if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+                       return -EBUSY;
+       }
+       *raw_cons = tmp_raw_cons;
+       return 0;
+}
+
+static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+       if (!rxr->bnapi->in_reset) {
+               rxr->bnapi->in_reset = true;
+               set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+       }
+       rxr->rx_next_cons = 0xffff;
+}
+
 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
                           struct rx_tpa_start_cmp *tpa_start,
                           struct rx_tpa_start_cmp_ext *tpa_start1)
@@ -837,6 +880,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        prod_rx_buf = &rxr->rx_buf_ring[prod];
        tpa_info = &rxr->rx_tpa[agg_id];
 
+       if (unlikely(cons != rxr->rx_next_cons)) {
+               bnxt_sched_reset(bp, rxr);
+               return;
+       }
+
        prod_rx_buf->data = tpa_info->data;
 
        mapping = tpa_info->mapping;
@@ -874,6 +922,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 
        rxr->rx_prod = NEXT_RX(prod);
        cons = NEXT_RX(cons);
+       rxr->rx_next_cons = NEXT_RX(cons);
        cons_rx_buf = &rxr->rx_buf_ring[cons];
 
        bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
@@ -987,6 +1036,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
        dma_addr_t mapping;
        struct sk_buff *skb;
 
+       if (unlikely(bnapi->in_reset)) {
+               int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
+
+               if (rc < 0)
+                       return ERR_PTR(-EBUSY);
+               return NULL;
+       }
+
        tpa_info = &rxr->rx_tpa[agg_id];
        data = tpa_info->data;
        prefetch(data);
@@ -1153,6 +1210,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
        cons = rxcmp->rx_cmp_opaque;
        rx_buf = &rxr->rx_buf_ring[cons];
        data = rx_buf->data;
+       if (unlikely(cons != rxr->rx_next_cons)) {
+               int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+
+               bnxt_sched_reset(bp, rxr);
+               return rc1;
+       }
        prefetch(data);
 
        agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
@@ -1252,6 +1315,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 
 next_rx:
        rxr->rx_prod = NEXT_RX(prod);
+       rxr->rx_next_cons = NEXT_RX(cons);
 
 next_rx_no_prod:
        *raw_cons = tmp_raw_cons;
@@ -1263,15 +1327,6 @@ next_rx_no_prod:
        ((data) &                               \
         HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
 
-#define BNXT_EVENT_POLICY_MASK \
-       HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK
-
-#define BNXT_EVENT_POLICY_SFT  \
-       HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT
-
-#define BNXT_GET_EVENT_POLICY(data)    \
-       (((data) & BNXT_EVENT_POLICY_MASK) >> BNXT_EVENT_POLICY_SFT)
-
 static int bnxt_async_event_process(struct bnxt *bp,
                                    struct hwrm_async_event_cmpl *cmpl)
 {
@@ -1310,9 +1365,6 @@ static int bnxt_async_event_process(struct bnxt *bp,
                if (bp->pf.port_id != port_id)
                        break;
 
-               bp->link_info.last_port_module_event =
-                       BNXT_GET_EVENT_POLICY(data1);
-
                set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
                break;
        }
@@ -1442,7 +1494,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                /* The valid test of the entry must be done first before
                 * reading any further.
                 */
-               rmb();
+               dma_rmb();
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
@@ -2537,6 +2589,7 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
                        rxr->rx_prod = 0;
                        rxr->rx_agg_prod = 0;
                        rxr->rx_sw_agg_prod = 0;
+                       rxr->rx_next_cons = 0;
                }
        }
 }
@@ -2718,7 +2771,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
                                 int timeout, bool silent)
 {
-       int i, intr_process, rc;
+       int i, intr_process, rc, tmo_count;
        struct input *req = msg;
        u32 *data = msg;
        __le32 *resp_len, *valid;
@@ -2747,11 +2800,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
                timeout = DFLT_HWRM_CMD_TIMEOUT;
 
        i = 0;
+       tmo_count = timeout * 40;
        if (intr_process) {
                /* Wait until hwrm response cmpl interrupt is processed */
                while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
-                      i++ < timeout) {
-                       usleep_range(600, 800);
+                      i++ < tmo_count) {
+                       usleep_range(25, 40);
                }
 
                if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
@@ -2762,30 +2816,30 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        } else {
                /* Check if response len is updated */
                resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
-               for (i = 0; i < timeout; i++) {
+               for (i = 0; i < tmo_count; i++) {
                        len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
                              HWRM_RESP_LEN_SFT;
                        if (len)
                                break;
-                       usleep_range(600, 800);
+                       usleep_range(25, 40);
                }
 
-               if (i >= timeout) {
+               if (i >= tmo_count) {
                        netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
                                   timeout, le16_to_cpu(req->req_type),
-                                  le16_to_cpu(req->seq_id), *resp_len);
+                                  le16_to_cpu(req->seq_id), len);
                        return -1;
                }
 
                /* Last word of resp contains valid bit */
                valid = bp->hwrm_cmd_resp_addr + len - 4;
-               for (i = 0; i < timeout; i++) {
+               for (i = 0; i < 5; i++) {
                        if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
                                break;
-                       usleep_range(600, 800);
+                       udelay(1);
                }
 
-               if (i >= timeout) {
+               if (i >= 5) {
                        netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
                                   timeout, le16_to_cpu(req->req_type),
                                   le16_to_cpu(req->seq_id), len, *valid);
@@ -4520,6 +4574,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
        int i;
 
        for (i = 0; i < bp->cp_nr_rings; i++) {
+               bp->bnapi[i]->in_reset = false;
                bnxt_enable_poll(bp->bnapi[i]);
                napi_enable(&bp->bnapi[i]->napi);
        }
@@ -4671,6 +4726,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
        link_info->transceiver = resp->xcvr_pkg_type;
        link_info->phy_addr = resp->eee_config_phy_addr &
                              PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
+       link_info->module_status = resp->module_status;
 
        if (bp->flags & BNXT_FLAG_EEE_CAP) {
                struct ethtool_eee *eee = &bp->eee;
@@ -4723,6 +4779,33 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
        return 0;
 }
 
+static void bnxt_get_port_module_status(struct bnxt *bp)
+{
+       struct bnxt_link_info *link_info = &bp->link_info;
+       struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
+       u8 module_status;
+
+       if (bnxt_update_link(bp, true))
+               return;
+
+       module_status = link_info->module_status;
+       switch (module_status) {
+       case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
+       case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
+       case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
+               netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
+                           bp->pf.port_id);
+               if (bp->hwrm_spec_code >= 0x10201) {
+                       netdev_warn(bp->dev, "Module part number %s\n",
+                                   resp->phy_vendor_partnumber);
+               }
+               if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
+                       netdev_warn(bp->dev, "TX is disabled\n");
+               if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
+                       netdev_warn(bp->dev, "SFP+ module is shutdown\n");
+       }
+}
+
 static void
 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
 {
@@ -5015,7 +5098,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
        /* Enable TX queues */
        bnxt_tx_enable(bp);
        mod_timer(&bp->timer, jiffies + bp->current_interval);
-       bnxt_update_link(bp, true);
+       /* Poll link status and check for SFP+ module status */
+       bnxt_get_port_module_status(bp);
 
        return 0;
 
@@ -5550,28 +5634,6 @@ bnxt_restart_timer:
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
 
-static void bnxt_port_module_event(struct bnxt *bp)
-{
-       struct bnxt_link_info *link_info = &bp->link_info;
-       struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
-
-       if (bnxt_update_link(bp, true))
-               return;
-
-       if (link_info->last_port_module_event != 0) {
-               netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
-                           bp->pf.port_id);
-               if (bp->hwrm_spec_code >= 0x10201) {
-                       netdev_warn(bp->dev, "Module part number %s\n",
-                                   resp->phy_vendor_partnumber);
-               }
-       }
-       if (link_info->last_port_module_event == 1)
-               netdev_warn(bp->dev, "TX is disabled\n");
-       if (link_info->last_port_module_event == 3)
-               netdev_warn(bp->dev, "Shutdown SFP+ module\n");
-}
-
 static void bnxt_cfg_ntp_filters(struct bnxt *);
 
 static void bnxt_sp_task(struct work_struct *work)
@@ -5620,7 +5682,7 @@ static void bnxt_sp_task(struct work_struct *work)
        }
 
        if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
-               bnxt_port_module_event(bp);
+               bnxt_get_port_module_status(bp);
 
        if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
                bnxt_hwrm_port_qstats(bp);
@@ -6197,6 +6259,22 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
        return rc;
 }
 
+static void bnxt_parse_log_pcie_link(struct bnxt *bp)
+{
+       enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+       enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+
+       if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+           speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+               netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
+       else
+               netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
+                           speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+                           speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+                           speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+                           "Unknown", width);
+}
+
 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        static int version_printed;
@@ -6317,6 +6395,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                    board_info[ent->driver_data].name,
                    (long)pci_resource_start(pdev, 0), dev->dev_addr);
 
+       bnxt_parse_log_pcie_link(bp);
+
        return 0;
 
 init_err:
index 62896352b0df0c396aa1c5e4e2d08c6c76d92366..2824d65b2e35be47015ea6654e00dbff3520bf19 100644 (file)
@@ -425,10 +425,17 @@ struct rx_tpa_end_cmp_ext {
 
 #define MAX_TPA                64
 
+#if (BNXT_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES   1
+#define MAX_RX_AGG_PAGES       4
+#define MAX_TX_PAGES   1
+#define MAX_CP_PAGES   8
+#else
 #define MAX_RX_PAGES   8
 #define MAX_RX_AGG_PAGES       32
 #define MAX_TX_PAGES   8
 #define MAX_CP_PAGES   64
+#endif
 
 #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
 #define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
@@ -584,6 +591,7 @@ struct bnxt_rx_ring_info {
        u16                     rx_prod;
        u16                     rx_agg_prod;
        u16                     rx_sw_agg_prod;
+       u16                     rx_next_cons;
        void __iomem            *rx_doorbell;
        void __iomem            *rx_agg_doorbell;
 
@@ -636,6 +644,7 @@ struct bnxt_napi {
 #ifdef CONFIG_NET_RX_BUSY_POLL
        atomic_t                poll_state;
 #endif
+       bool                    in_reset;
 };
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -829,6 +838,7 @@ struct bnxt_link_info {
        u16                     lp_auto_link_speeds;
        u16                     force_link_speed;
        u32                     preemphasis;
+       u8                      module_status;
 
        /* copy of requested setting from ethtool cmd */
        u8                      autoneg;
@@ -840,7 +850,6 @@ struct bnxt_link_info {
        u32                     advertising;
        bool                    force_link_chng;
 
-       u8                      last_port_module_event;
        /* a copy of phy_qcfg output used to report link
         * info to VF
         */
@@ -1121,6 +1130,16 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
 
 #endif
 
+#define I2C_DEV_ADDR_A0                                0xa0
+#define I2C_DEV_ADDR_A2                                0xa2
+#define SFP_EEPROM_SFF_8472_COMP_ADDR          0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE          1
+#define SFF_MODULE_ID_SFP                      0x3
+#define SFF_MODULE_ID_QSFP                     0xc
+#define SFF_MODULE_ID_QSFP_PLUS                        0xd
+#define SFF_MODULE_ID_QSFP28                   0x11
+#define BNXT_MAX_PHY_I2C_RESP_SIZE             64
+
 void bnxt_set_ring_params(struct bnxt *);
 void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
 int _hwrm_send_message(struct bnxt *, void *, u32, int);
index d6e41f237f2ca412c53301e10fe50a9595bdcbb9..a38cb047b54083897fa6e8df5c098c7e1b98d7ac 100644 (file)
@@ -327,7 +327,11 @@ static void bnxt_get_channels(struct net_device *dev,
        bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
        channel->max_combined = max_rx_rings;
 
-       bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false);
+       if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
+               max_rx_rings = 0;
+               max_tx_rings = 0;
+       }
+
        tcs = netdev_get_num_tc(dev);
        if (tcs > 1)
                max_tx_rings /= tcs;
@@ -1494,6 +1498,125 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
        return 0;
 }
 
+static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
+                                           u16 page_number, u16 start_addr,
+                                           u16 data_length, u8 *buf)
+{
+       struct hwrm_port_phy_i2c_read_input req = {0};
+       struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+       int rc, byte_offset = 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+       req.i2c_slave_addr = i2c_addr;
+       req.page_number = cpu_to_le16(page_number);
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       do {
+               u16 xfer_size;
+
+               xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
+               data_length -= xfer_size;
+               req.page_offset = cpu_to_le16(start_addr + byte_offset);
+               req.data_length = xfer_size;
+               req.enables = cpu_to_le32(start_addr + byte_offset ?
+                                PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
+               mutex_lock(&bp->hwrm_cmd_lock);
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (!rc)
+                       memcpy(buf + byte_offset, output->data, xfer_size);
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               byte_offset += xfer_size;
+       } while (!rc && data_length > 0);
+
+       return rc;
+}
+
+static int bnxt_get_module_info(struct net_device *dev,
+                               struct ethtool_modinfo *modinfo)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwrm_port_phy_i2c_read_input req = {0};
+       struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       /* No point in going further if phy status indicates
+        * module is not inserted or if it is powered down or
+        * if it is of type 10GBase-T
+        */
+       if (bp->link_info.module_status >
+               PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
+               return -EOPNOTSUPP;
+
+       /* This feature is not supported in older firmware versions */
+       if (bp->hwrm_spec_code < 0x10202)
+               return -EOPNOTSUPP;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+       req.i2c_slave_addr = I2C_DEV_ADDR_A0;
+       req.page_number = 0;
+       req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
+       req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               u32 module_id = le32_to_cpu(output->data[0]);
+
+               switch (module_id) {
+               case SFF_MODULE_ID_SFP:
+                       modinfo->type = ETH_MODULE_SFF_8472;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+                       break;
+               case SFF_MODULE_ID_QSFP:
+               case SFF_MODULE_ID_QSFP_PLUS:
+                       modinfo->type = ETH_MODULE_SFF_8436;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+                       break;
+               case SFF_MODULE_ID_QSFP28:
+                       modinfo->type = ETH_MODULE_SFF_8636;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+                       break;
+               default:
+                       rc = -EOPNOTSUPP;
+                       break;
+               }
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_get_module_eeprom(struct net_device *dev,
+                                 struct ethtool_eeprom *eeprom,
+                                 u8 *data)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       u16  start = eeprom->offset, length = eeprom->len;
+       int rc;
+
+       memset(data, 0, eeprom->len);
+
+       /* Read A0 portion of the EEPROM */
+       if (start < ETH_MODULE_SFF_8436_LEN) {
+               if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
+                       length = ETH_MODULE_SFF_8436_LEN - start;
+               rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
+                                                     start, length, data);
+               if (rc)
+                       return rc;
+               start += length;
+               data += length;
+               length = eeprom->len - length;
+       }
+
+       /* Read A2 portion of the EEPROM */
+       if (length) {
+               start -= ETH_MODULE_SFF_8436_LEN;
+               bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
+                                                length, data);
+       }
+       return rc;
+}
+
 const struct ethtool_ops bnxt_ethtool_ops = {
        .get_settings           = bnxt_get_settings,
        .set_settings           = bnxt_set_settings,
@@ -1524,4 +1647,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
        .get_link               = bnxt_get_link,
        .get_eee                = bnxt_get_eee,
        .set_eee                = bnxt_set_eee,
+       .get_module_info        = bnxt_get_module_info,
+       .get_module_eeprom      = bnxt_get_module_eeprom,
 };
index 80f95560086d7f07176928a80549c60d2af9d704..05e3c49a7677b5951e8a117b44ae83ec29bee9d9 100644 (file)
@@ -2093,6 +2093,40 @@ struct hwrm_port_phy_qcaps_output {
        #define PORT_PHY_QCAPS_RESP_VALID_SFT                       24
 };
 
+/* hwrm_port_phy_i2c_read */
+/* Input (40 bytes) */
+struct hwrm_port_phy_i2c_read_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       __le32 enables;
+       #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET           0x1UL
+       __le16 port_id;
+       u8 i2c_slave_addr;
+       u8 unused_0;
+       __le16 page_number;
+       __le16 page_offset;
+       u8 data_length;
+       u8 unused_1[7];
+};
+
+/* Output (80 bytes) */
+struct hwrm_port_phy_i2c_read_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 data[16];
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
 /* Input (24 bytes) */
 struct hwrm_queue_qportcfg_input {
        __le16 req_type;
index fa05e347262ff76bde5c64f32146ea0daea7beb2..06b819db51b18d9e3aaa8e1ada8ed8e929597143 100644 (file)
@@ -533,6 +533,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
                nicvf_config_vlan_stripping(nic, nic->netdev->features);
 
        /* Enable Receive queue */
+       memset(&rq_cfg, 0, sizeof(struct rq_cfg));
        rq_cfg.ena = 1;
        rq_cfg.tcp_ena = 0;
        nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
@@ -565,6 +566,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
                              qidx, (u64)(cq->dmem.phys_base));
 
        /* Enable Completion queue */
+       memset(&cq_cfg, 0, sizeof(struct cq_cfg));
        cq_cfg.ena = 1;
        cq_cfg.reset = 0;
        cq_cfg.caching = 0;
@@ -613,6 +615,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
                              qidx, (u64)(sq->dmem.phys_base));
 
        /* Enable send queue  & set queue size */
+       memset(&sq_cfg, 0, sizeof(struct sq_cfg));
        sq_cfg.ena = 1;
        sq_cfg.reset = 0;
        sq_cfg.ldwb = 0;
@@ -649,6 +652,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
 
        /* Enable RBDR  & set queue size */
        /* Buffer size should be in multiples of 128 bytes */
+       memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
        rbdr_cfg.ena = 1;
        rbdr_cfg.reset = 0;
        rbdr_cfg.ldwb = 0;
index 1f23845a0694b5e64eaaa0f71ffca230d54feaf0..085f9125cf42a6c1aa76bbb6744ba5b9ba919d31 100644 (file)
@@ -145,7 +145,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
        u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
 
        /* Check if we got TX */
-       if (!priv->tx_packet_sent || tx_ctrl_ct)
+       if (!priv->tx_skb || tx_ctrl_ct)
                return;
 
        /* Ack Tx ctrl register */
@@ -160,7 +160,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
        }
 
        dev_kfree_skb(priv->tx_skb);
-       priv->tx_packet_sent = false;
+       priv->tx_skb = NULL;
 
        if (netif_queue_stopped(ndev))
                netif_wake_queue(ndev);
@@ -183,6 +183,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
        work_done = nps_enet_rx_handler(ndev);
        if (work_done < budget) {
                u32 buf_int_enable_value = 0;
+               u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+               u32 tx_ctrl_ct =
+                       (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
 
                napi_complete(napi);
 
@@ -192,6 +195,18 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
 
                nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
                                 buf_int_enable_value);
+
+               /* in case we will get a tx interrupt while interrupts
+                * are masked, we will lose it since the tx is edge interrupt.
+                * specifically, while executing the code section above,
+                * between nps_enet_tx_handler and the interrupts enable, all
+                * tx requests will be stuck until we will get an rx interrupt.
+                * the two code lines below will solve this situation by
+                * re-adding ourselves to the poll list.
+                */
+
+               if (priv->tx_skb && !tx_ctrl_ct)
+                       napi_reschedule(napi);
        }
 
        return work_done;
@@ -217,7 +232,7 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
        u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
        u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
 
-       if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr)
+       if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr)
                if (likely(napi_schedule_prep(&priv->napi))) {
                        nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
                        __napi_schedule(&priv->napi);
@@ -387,8 +402,6 @@ static void nps_enet_send_frame(struct net_device *ndev,
        /* Write the length of the Frame */
        tx_ctrl_value |= length << TX_CTL_NT_SHIFT;
 
-       /* Indicate SW is done */
-       priv->tx_packet_sent = true;
        tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT;
        /* Send Frame */
        nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value);
@@ -465,7 +478,7 @@ static s32 nps_enet_open(struct net_device *ndev)
        s32 err;
 
        /* Reset private variables */
-       priv->tx_packet_sent = false;
+       priv->tx_skb = NULL;
        priv->ge_mac_cfg_2_value = 0;
        priv->ge_mac_cfg_3_value = 0;
 
@@ -534,6 +547,11 @@ static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
 
        priv->tx_skb = skb;
 
+       /* make sure tx_skb is actually written to the memory
+        * before the HW is informed and the IRQ is fired.
+        */
+       wmb();
+
        nps_enet_send_frame(ndev, skb);
 
        return NETDEV_TX_OK;
index d0cab600bce8d94bbfde1fbd30c2fca76fe2cb87..3939ca20cc9fa0b01b108504fae363927ca92472 100644 (file)
  * struct nps_enet_priv - Storage of ENET's private information.
  * @regs_base:      Base address of ENET memory-mapped control registers.
  * @irq:            For RX/TX IRQ number.
- * @tx_packet_sent: SW indication if frame is being sent.
  * @tx_skb:         socket buffer of sent frame.
  * @napi:           Structure for NAPI.
  */
 struct nps_enet_priv {
        void __iomem *regs_base;
        s32 irq;
-       bool tx_packet_sent;
        struct sk_buff *tx_skb;
        struct napi_struct napi;
        u32 ge_mac_cfg_2_value;
index 2af603f3e4183d5209ec9cb762892b7cac185206..cd391376036c9c6e2045f8e9dbf1c3a2c6d921da 100644 (file)
@@ -121,7 +121,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
        /* EEPROM access above 16k is unsupported */
        if (size > 14)
                size = 14;
-       nvm->word_size = 1 << size;
+       nvm->word_size = BIT(size);
 
        return 0;
 }
@@ -845,27 +845,27 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
 
        /* Transmit Descriptor Control 0 */
        reg = er32(TXDCTL(0));
-       reg |= (1 << 22);
+       reg |= BIT(22);
        ew32(TXDCTL(0), reg);
 
        /* Transmit Descriptor Control 1 */
        reg = er32(TXDCTL(1));
-       reg |= (1 << 22);
+       reg |= BIT(22);
        ew32(TXDCTL(1), reg);
 
        /* Transmit Arbitration Control 0 */
        reg = er32(TARC(0));
        reg &= ~(0xF << 27);    /* 30:27 */
        if (hw->phy.media_type != e1000_media_type_copper)
-               reg &= ~(1 << 20);
+               reg &= ~BIT(20);
        ew32(TARC(0), reg);
 
        /* Transmit Arbitration Control 1 */
        reg = er32(TARC(1));
        if (er32(TCTL) & E1000_TCTL_MULR)
-               reg &= ~(1 << 28);
+               reg &= ~BIT(28);
        else
-               reg |= (1 << 28);
+               reg |= BIT(28);
        ew32(TARC(1), reg);
 
        /* Disable IPv6 extension header parsing because some malformed
index 5f7016442ec4faeb827c78a074dbf6bef3eb0fec..7fd4d54599e4557dd37b457396145195a77da3fa 100644 (file)
@@ -185,7 +185,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
                /* EEPROM access above 16k is unsupported */
                if (size > 14)
                        size = 14;
-               nvm->word_size = 1 << size;
+               nvm->word_size = BIT(size);
                break;
        }
 
@@ -1163,12 +1163,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
 
        /* Transmit Descriptor Control 0 */
        reg = er32(TXDCTL(0));
-       reg |= (1 << 22);
+       reg |= BIT(22);
        ew32(TXDCTL(0), reg);
 
        /* Transmit Descriptor Control 1 */
        reg = er32(TXDCTL(1));
-       reg |= (1 << 22);
+       reg |= BIT(22);
        ew32(TXDCTL(1), reg);
 
        /* Transmit Arbitration Control 0 */
@@ -1177,11 +1177,11 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
        switch (hw->mac.type) {
        case e1000_82571:
        case e1000_82572:
-               reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+               reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26);
                break;
        case e1000_82574:
        case e1000_82583:
-               reg |= (1 << 26);
+               reg |= BIT(26);
                break;
        default:
                break;
@@ -1193,12 +1193,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
        switch (hw->mac.type) {
        case e1000_82571:
        case e1000_82572:
-               reg &= ~((1 << 29) | (1 << 30));
-               reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+               reg &= ~(BIT(29) | BIT(30));
+               reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26);
                if (er32(TCTL) & E1000_TCTL_MULR)
-                       reg &= ~(1 << 28);
+                       reg &= ~BIT(28);
                else
-                       reg |= (1 << 28);
+                       reg |= BIT(28);
                ew32(TARC(1), reg);
                break;
        default:
@@ -1211,7 +1211,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
        case e1000_82574:
        case e1000_82583:
                reg = er32(CTRL);
-               reg &= ~(1 << 29);
+               reg &= ~BIT(29);
                ew32(CTRL, reg);
                break;
        default:
@@ -1224,8 +1224,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
        case e1000_82574:
        case e1000_82583:
                reg = er32(CTRL_EXT);
-               reg &= ~(1 << 23);
-               reg |= (1 << 22);
+               reg &= ~BIT(23);
+               reg |= BIT(22);
                ew32(CTRL_EXT, reg);
                break;
        default:
@@ -1261,7 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
        case e1000_82574:
        case e1000_82583:
                reg = er32(GCR);
-               reg |= (1 << 22);
+               reg |= BIT(22);
                ew32(GCR, reg);
 
                /* Workaround for hardware errata.
@@ -1308,8 +1308,8 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
                                       E1000_VFTA_ENTRY_SHIFT) &
                            E1000_VFTA_ENTRY_MASK;
                        vfta_bit_in_reg =
-                           1 << (hw->mng_cookie.vlan_id &
-                                 E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+                           BIT(hw->mng_cookie.vlan_id &
+                               E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
                }
                break;
        default:
index 52eb641fc9dce49b0db3855fcd81fdc8c14fae1b..ef96cd11d6d2c34a726db4b1c0c0a5ec8a76bf7c 100644 (file)
@@ -109,18 +109,18 @@ struct e1000_info;
 #define E1000_TXDCTL_DMA_BURST_ENABLE                          \
        (E1000_TXDCTL_GRAN | /* set descriptor granularity */  \
         E1000_TXDCTL_COUNT_DESC |                             \
-        (1 << 16) | /* wthresh must be +1 more than desired */\
-        (1 << 8)  | /* hthresh */                             \
-        0x1f)       /* pthresh */
+        (1u << 16) | /* wthresh must be +1 more than desired */\
+        (1u << 8)  | /* hthresh */                             \
+        0x1f)        /* pthresh */
 
 #define E1000_RXDCTL_DMA_BURST_ENABLE                          \
        (0x01000000 | /* set descriptor granularity */         \
-        (4 << 16)  | /* set writeback threshold    */         \
-        (4 << 8)   | /* set prefetch threshold     */         \
+        (4u << 16) | /* set writeback threshold    */         \
+        (4u << 8)  | /* set prefetch threshold     */         \
         0x20)        /* set hthresh                */
 
-#define E1000_TIDV_FPD (1 << 31)
-#define E1000_RDTR_FPD (1 << 31)
+#define E1000_TIDV_FPD BIT(31)
+#define E1000_RDTR_FPD BIT(31)
 
 enum e1000_boards {
        board_82571,
@@ -347,6 +347,7 @@ struct e1000_adapter {
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_clock_info;
        struct pm_qos_request pm_qos_req;
+       s32 ptp_delta;
 
        u16 eee_advert;
 };
@@ -404,53 +405,53 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
 #define E1000_82574_SYSTIM_EPSILON     (1ULL << 35ULL)
 
 /* hardware capability, feature, and workaround flags */
-#define FLAG_HAS_AMT                      (1 << 0)
-#define FLAG_HAS_FLASH                    (1 << 1)
-#define FLAG_HAS_HW_VLAN_FILTER           (1 << 2)
-#define FLAG_HAS_WOL                      (1 << 3)
-/* reserved bit4 */
-#define FLAG_HAS_CTRLEXT_ON_LOAD          (1 << 5)
-#define FLAG_HAS_SWSM_ON_LOAD             (1 << 6)
-#define FLAG_HAS_JUMBO_FRAMES             (1 << 7)
-#define FLAG_READ_ONLY_NVM                (1 << 8)
-#define FLAG_IS_ICH                       (1 << 9)
-#define FLAG_HAS_MSIX                     (1 << 10)
-#define FLAG_HAS_SMART_POWER_DOWN         (1 << 11)
-#define FLAG_IS_QUAD_PORT_A               (1 << 12)
-#define FLAG_IS_QUAD_PORT                 (1 << 13)
-#define FLAG_HAS_HW_TIMESTAMP             (1 << 14)
-#define FLAG_APME_IN_WUC                  (1 << 15)
-#define FLAG_APME_IN_CTRL3                (1 << 16)
-#define FLAG_APME_CHECK_PORT_B            (1 << 17)
-#define FLAG_DISABLE_FC_PAUSE_TIME        (1 << 18)
-#define FLAG_NO_WAKE_UCAST                (1 << 19)
-#define FLAG_MNG_PT_ENABLED               (1 << 20)
-#define FLAG_RESET_OVERWRITES_LAA         (1 << 21)
-#define FLAG_TARC_SPEED_MODE_BIT          (1 << 22)
-#define FLAG_TARC_SET_BIT_ZERO            (1 << 23)
-#define FLAG_RX_NEEDS_RESTART             (1 << 24)
-#define FLAG_LSC_GIG_SPEED_DROP           (1 << 25)
-#define FLAG_SMART_POWER_DOWN             (1 << 26)
-#define FLAG_MSI_ENABLED                  (1 << 27)
-/* reserved (1 << 28) */
-#define FLAG_TSO_FORCE                    (1 << 29)
-#define FLAG_RESTART_NOW                  (1 << 30)
-#define FLAG_MSI_TEST_FAILED              (1 << 31)
-
-#define FLAG2_CRC_STRIPPING               (1 << 0)
-#define FLAG2_HAS_PHY_WAKEUP              (1 << 1)
-#define FLAG2_IS_DISCARDING               (1 << 2)
-#define FLAG2_DISABLE_ASPM_L1             (1 << 3)
-#define FLAG2_HAS_PHY_STATS               (1 << 4)
-#define FLAG2_HAS_EEE                     (1 << 5)
-#define FLAG2_DMA_BURST                   (1 << 6)
-#define FLAG2_DISABLE_ASPM_L0S            (1 << 7)
-#define FLAG2_DISABLE_AIM                 (1 << 8)
-#define FLAG2_CHECK_PHY_HANG              (1 << 9)
-#define FLAG2_NO_DISABLE_RX               (1 << 10)
-#define FLAG2_PCIM2PCI_ARBITER_WA         (1 << 11)
-#define FLAG2_DFLT_CRC_STRIPPING          (1 << 12)
-#define FLAG2_CHECK_RX_HWTSTAMP           (1 << 13)
+#define FLAG_HAS_AMT                      BIT(0)
+#define FLAG_HAS_FLASH                    BIT(1)
+#define FLAG_HAS_HW_VLAN_FILTER           BIT(2)
+#define FLAG_HAS_WOL                      BIT(3)
+/* reserved BIT(4) */
+#define FLAG_HAS_CTRLEXT_ON_LOAD          BIT(5)
+#define FLAG_HAS_SWSM_ON_LOAD             BIT(6)
+#define FLAG_HAS_JUMBO_FRAMES             BIT(7)
+#define FLAG_READ_ONLY_NVM                BIT(8)
+#define FLAG_IS_ICH                       BIT(9)
+#define FLAG_HAS_MSIX                     BIT(10)
+#define FLAG_HAS_SMART_POWER_DOWN         BIT(11)
+#define FLAG_IS_QUAD_PORT_A               BIT(12)
+#define FLAG_IS_QUAD_PORT                 BIT(13)
+#define FLAG_HAS_HW_TIMESTAMP             BIT(14)
+#define FLAG_APME_IN_WUC                  BIT(15)
+#define FLAG_APME_IN_CTRL3                BIT(16)
+#define FLAG_APME_CHECK_PORT_B            BIT(17)
+#define FLAG_DISABLE_FC_PAUSE_TIME        BIT(18)
+#define FLAG_NO_WAKE_UCAST                BIT(19)
+#define FLAG_MNG_PT_ENABLED               BIT(20)
+#define FLAG_RESET_OVERWRITES_LAA         BIT(21)
+#define FLAG_TARC_SPEED_MODE_BIT          BIT(22)
+#define FLAG_TARC_SET_BIT_ZERO            BIT(23)
+#define FLAG_RX_NEEDS_RESTART             BIT(24)
+#define FLAG_LSC_GIG_SPEED_DROP           BIT(25)
+#define FLAG_SMART_POWER_DOWN             BIT(26)
+#define FLAG_MSI_ENABLED                  BIT(27)
+/* reserved BIT(28) */
+#define FLAG_TSO_FORCE                    BIT(29)
+#define FLAG_RESTART_NOW                  BIT(30)
+#define FLAG_MSI_TEST_FAILED              BIT(31)
+
+#define FLAG2_CRC_STRIPPING               BIT(0)
+#define FLAG2_HAS_PHY_WAKEUP              BIT(1)
+#define FLAG2_IS_DISCARDING               BIT(2)
+#define FLAG2_DISABLE_ASPM_L1             BIT(3)
+#define FLAG2_HAS_PHY_STATS               BIT(4)
+#define FLAG2_HAS_EEE                     BIT(5)
+#define FLAG2_DMA_BURST                   BIT(6)
+#define FLAG2_DISABLE_ASPM_L0S            BIT(7)
+#define FLAG2_DISABLE_AIM                 BIT(8)
+#define FLAG2_CHECK_PHY_HANG              BIT(9)
+#define FLAG2_NO_DISABLE_RX               BIT(10)
+#define FLAG2_PCIM2PCI_ARBITER_WA         BIT(11)
+#define FLAG2_DFLT_CRC_STRIPPING          BIT(12)
+#define FLAG2_CHECK_RX_HWTSTAMP           BIT(13)
 
 #define E1000_RX_DESC_PS(R, i)     \
        (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
index 1e3973aa707cb894ec63ced66fd80809671c4d29..7aff68a4a4df527d26c50da69d0cad2dd91c2767 100644 (file)
@@ -201,6 +201,9 @@ static int e1000_get_settings(struct net_device *netdev,
        else
                ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
 
+       if (hw->phy.media_type != e1000_media_type_copper)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
        return 0;
 }
 
@@ -236,8 +239,13 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
                mac->forced_speed_duplex = ADVERTISE_100_FULL;
                break;
        case SPEED_1000 + DUPLEX_FULL:
-               mac->autoneg = 1;
-               adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+               if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+                       mac->autoneg = 1;
+                       adapter->hw.phy.autoneg_advertised =
+                               ADVERTISE_1000_FULL;
+               } else {
+                       mac->forced_speed_duplex = ADVERTISE_1000_FULL;
+               }
                break;
        case SPEED_1000 + DUPLEX_HALF:  /* not supported */
        default:
@@ -439,8 +447,9 @@ static void e1000_get_regs(struct net_device *netdev,
 
        memset(p, 0, E1000_REGS_LEN * sizeof(u32));
 
-       regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
-           adapter->pdev->device;
+       regs->version = (1u << 24) |
+                       (adapter->pdev->revision << 16) |
+                       adapter->pdev->device;
 
        regs_buff[0] = er32(CTRL);
        regs_buff[1] = er32(STATUS);
@@ -895,7 +904,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        case e1000_pch2lan:
        case e1000_pch_lpt:
        case e1000_pch_spt:
-               mask |= (1 << 18);
+               mask |= BIT(18);
                break;
        default:
                break;
@@ -914,9 +923,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
 
                        /* SHRAH[9] different than the others */
                        if (i == 10)
-                               mask |= (1 << 30);
+                               mask |= BIT(30);
                        else
-                               mask &= ~(1 << 30);
+                               mask &= ~BIT(30);
                }
                if (mac->type == e1000_pch2lan) {
                        /* SHRAH[0,1,2] different than previous */
@@ -924,7 +933,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
                                mask &= 0xFFF4FFFF;
                        /* SHRAH[3] different than SHRAH[0,1,2] */
                        if (i == 4)
-                               mask |= (1 << 30);
+                               mask |= BIT(30);
                        /* RAR[1-6] owned by management engine - skipping */
                        if (i > 0)
                                i += 6;
@@ -1019,7 +1028,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
        /* Test each interrupt */
        for (i = 0; i < 10; i++) {
                /* Interrupt to test */
-               mask = 1 << i;
+               mask = BIT(i);
 
                if (adapter->flags & FLAG_IS_ICH) {
                        switch (mask) {
@@ -1387,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
        case e1000_phy_82579:
                /* Disable PHY energy detect power down */
                e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
-               e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+               e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3));
                /* Disable full chip energy detect */
                e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
                e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
@@ -1453,7 +1462,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
 
        /* disable autoneg */
        ctrl = er32(TXCW);
-       ctrl &= ~(1 << 31);
+       ctrl &= ~BIT(31);
        ew32(TXCW, ctrl);
 
        link = (er32(STATUS) & E1000_STATUS_LU);
@@ -2283,19 +2292,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
                                  SOF_TIMESTAMPING_RX_HARDWARE |
                                  SOF_TIMESTAMPING_RAW_HARDWARE);
 
-       info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
-       info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
-                           (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-                           (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                           (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                           (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
-                           (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                           (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                           (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-                           (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-                           (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-                           (1 << HWTSTAMP_FILTER_ALL));
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+       info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) |
+                           BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                           BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                           BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+                           BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+                           BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+                           BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+                           BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+                           BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+                           BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+                           BIT(HWTSTAMP_FILTER_ALL));
 
        if (adapter->ptp_clock)
                info->phc_index = ptp_clock_index(adapter->ptp_clock);
index c0f4887ea44d8c2f157cb9c63555acb5217ff1c9..3e11322d8d586a839bb19b15b2dc37b680ad8bf5 100644 (file)
@@ -1048,7 +1048,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
 
                while (value > PCI_LTR_VALUE_MASK) {
                        scale++;
-                       value = DIV_ROUND_UP(value, (1 << 5));
+                       value = DIV_ROUND_UP(value, BIT(5));
                }
                if (scale > E1000_LTRV_SCALE_MAX) {
                        e_dbg("Invalid LTR latency scale %d\n", scale);
@@ -1573,7 +1573,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
 
                if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
-                       phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+                       phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
 
                e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
                break;
@@ -2044,9 +2044,9 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
                /* Restore SMBus frequency */
                if (freq--) {
                        phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
-                       phy_data |= (freq & (1 << 0)) <<
+                       phy_data |= (freq & BIT(0)) <<
                            HV_SMB_ADDR_FREQ_LOW_SHIFT;
-                       phy_data |= (freq & (1 << 1)) <<
+                       phy_data |= (freq & BIT(1)) <<
                            (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
                } else {
                        e_dbg("Unsupported SMB frequency in PHY\n");
@@ -2530,7 +2530,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
 
        /* disable Rx path while enabling/disabling workaround */
        e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
-       ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+       ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
        if (ret_val)
                return ret_val;
 
@@ -2561,7 +2561,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
 
                /* Enable jumbo frame workaround in the MAC */
                mac_reg = er32(FFLT_DBG);
-               mac_reg &= ~(1 << 14);
+               mac_reg &= ~BIT(14);
                mac_reg |= (7 << 15);
                ew32(FFLT_DBG, mac_reg);
 
@@ -2576,7 +2576,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        return ret_val;
                ret_val = e1000e_write_kmrn_reg(hw,
                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
-                                               data | (1 << 0));
+                                               data | BIT(0));
                if (ret_val)
                        return ret_val;
                ret_val = e1000e_read_kmrn_reg(hw,
@@ -2600,7 +2600,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                if (ret_val)
                        return ret_val;
                e1e_rphy(hw, PHY_REG(769, 16), &data);
-               data &= ~(1 << 13);
+               data &= ~BIT(13);
                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
                if (ret_val)
                        return ret_val;
@@ -2614,7 +2614,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                if (ret_val)
                        return ret_val;
                e1e_rphy(hw, HV_PM_CTRL, &data);
-               ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+               ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
                if (ret_val)
                        return ret_val;
        } else {
@@ -2634,7 +2634,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        return ret_val;
                ret_val = e1000e_write_kmrn_reg(hw,
                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
-                                               data & ~(1 << 0));
+                                               data & ~BIT(0));
                if (ret_val)
                        return ret_val;
                ret_val = e1000e_read_kmrn_reg(hw,
@@ -2657,7 +2657,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                if (ret_val)
                        return ret_val;
                e1e_rphy(hw, PHY_REG(769, 16), &data);
-               data |= (1 << 13);
+               data |= BIT(13);
                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
                if (ret_val)
                        return ret_val;
@@ -2671,13 +2671,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                if (ret_val)
                        return ret_val;
                e1e_rphy(hw, HV_PM_CTRL, &data);
-               ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+               ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
                if (ret_val)
                        return ret_val;
        }
 
        /* re-enable Rx path after enabling/disabling workaround */
-       return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+       return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
 }
 
 /**
@@ -4841,7 +4841,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
 
        /* Extended Device Control */
        reg = er32(CTRL_EXT);
-       reg |= (1 << 22);
+       reg |= BIT(22);
        /* Enable PHY low-power state when MAC is at D3 w/o WoL */
        if (hw->mac.type >= e1000_pchlan)
                reg |= E1000_CTRL_EXT_PHYPDEN;
@@ -4849,34 +4849,34 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
 
        /* Transmit Descriptor Control 0 */
        reg = er32(TXDCTL(0));
-       reg |= (1 << 22);
+       reg |= BIT(22);
        ew32(TXDCTL(0), reg);
 
        /* Transmit Descriptor Control 1 */
        reg = er32(TXDCTL(1));
-       reg |= (1 << 22);
+       reg |= BIT(22);
        ew32(TXDCTL(1), reg);
 
        /* Transmit Arbitration Control 0 */
        reg = er32(TARC(0));
        if (hw->mac.type == e1000_ich8lan)
-               reg |= (1 << 28) | (1 << 29);
-       reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+               reg |= BIT(28) | BIT(29);
+       reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
        ew32(TARC(0), reg);
 
        /* Transmit Arbitration Control 1 */
        reg = er32(TARC(1));
        if (er32(TCTL) & E1000_TCTL_MULR)
-               reg &= ~(1 << 28);
+               reg &= ~BIT(28);
        else
-               reg |= (1 << 28);
-       reg |= (1 << 24) | (1 << 26) | (1 << 30);
+               reg |= BIT(28);
+       reg |= BIT(24) | BIT(26) | BIT(30);
        ew32(TARC(1), reg);
 
        /* Device Status */
        if (hw->mac.type == e1000_ich8lan) {
                reg = er32(STATUS);
-               reg &= ~(1 << 31);
+               reg &= ~BIT(31);
                ew32(STATUS, reg);
        }
 
index 2311f6003f58cedcc0d726a31c05b69d0b8b7bcd..67163ca898ba2abca4e4e66eecdf76a5f5c3984c 100644 (file)
                                 (ID_LED_OFF1_ON2  <<  4) | \
                                 (ID_LED_DEF1_DEF2))
 
-#define E1000_ICH_NVM_SIG_WORD         0x13
-#define E1000_ICH_NVM_SIG_MASK         0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK   0xC0
-#define E1000_ICH_NVM_SIG_VALUE                0x80
+#define E1000_ICH_NVM_SIG_WORD         0x13u
+#define E1000_ICH_NVM_SIG_MASK         0xC000u
+#define E1000_ICH_NVM_VALID_SIG_MASK   0xC0u
+#define E1000_ICH_NVM_SIG_VALUE                0x80u
 
 #define E1000_ICH8_LAN_INIT_TIMEOUT    1500
 
index e59d7c283cd405daaaea4bb210305c207656bdf3..b322011ec2828a704fcac2acf9b949f8c955df48 100644 (file)
@@ -346,7 +346,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
                hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
                hash_bit = hash_value & 0x1F;
 
-               hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+               hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
                mc_addr_list += (ETH_ALEN);
        }
 
index 269087cb7b963ae81cb1bafec5ed98a11f6c83c4..75e60897b7e748bca73317ae0e6d9247e6a9bbf9 100644 (file)
@@ -317,8 +317,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
                else
                        next_desc = "";
                pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
-                       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
-                        ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+                       (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
+                        ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
                        i,
                        (unsigned long long)le64_to_cpu(u0->a),
                        (unsigned long long)le64_to_cpu(u0->b),
@@ -2018,7 +2018,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
        adapter->eiac_mask |= E1000_IMS_OTHER;
 
        /* Cause Tx interrupts on every write back */
-       ivar |= (1 << 31);
+       ivar |= BIT(31);
 
        ew32(IVAR, ivar);
 
@@ -2709,7 +2709,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev,
        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
                index = (vid >> 5) & 0x7F;
                vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
-               vfta |= (1 << (vid & 0x1F));
+               vfta |= BIT((vid & 0x1F));
                hw->mac.ops.write_vfta(hw, index, vfta);
        }
 
@@ -2737,7 +2737,7 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
                index = (vid >> 5) & 0x7F;
                vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
-               vfta &= ~(1 << (vid & 0x1F));
+               vfta &= ~BIT((vid & 0x1F));
                hw->mac.ops.write_vfta(hw, index, vfta);
        }
 
@@ -2878,7 +2878,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
 
                        /* Enable this decision filter in MANC2H */
                        if (mdef)
-                               manc2h |= (1 << i);
+                               manc2h |= BIT(i);
 
                        j |= mdef;
                }
@@ -2891,7 +2891,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
                        if (er32(MDEF(i)) == 0) {
                                ew32(MDEF(i), (E1000_MDEF_PORT_623 |
                                               E1000_MDEF_PORT_664));
-                               manc2h |= (1 << 1);
+                               manc2h |= BIT(1);
                                j++;
                                break;
                        }
@@ -2971,7 +2971,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
                /* set the speed mode bit, we'll clear it if we're not at
                 * gigabit link later
                 */
-#define SPEED_MODE_BIT (1 << 21)
+#define SPEED_MODE_BIT BIT(21)
                tarc |= SPEED_MODE_BIT;
                ew32(TARC(0), tarc);
        }
@@ -3071,12 +3071,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
 
                e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
                phy_data &= 0xfff8;
-               phy_data |= (1 << 2);
+               phy_data |= BIT(2);
                e1e_wphy(hw, PHY_REG(770, 26), phy_data);
 
                e1e_rphy(hw, 22, &phy_data);
                phy_data &= 0x0fff;
-               phy_data |= (1 << 14);
+               phy_data |= BIT(14);
                e1e_wphy(hw, 0x10, 0x2823);
                e1e_wphy(hw, 0x11, 0x0003);
                e1e_wphy(hw, 22, phy_data);
@@ -3368,12 +3368,12 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
                 * combining
                 */
                netdev_for_each_uc_addr(ha, netdev) {
-                       int rval;
+                       int ret_val;
 
                        if (!rar_entries)
                                break;
-                       rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
-                       if (rval < 0)
+                       ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+                       if (ret_val < 0)
                                return -ENOMEM;
                        count++;
                }
@@ -3503,8 +3503,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
            !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
                u32 fextnvm7 = er32(FEXTNVM7);
 
-               if (!(fextnvm7 & (1 << 0))) {
-                       ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+               if (!(fextnvm7 & BIT(0))) {
+                       ew32(FEXTNVM7, fextnvm7 | BIT(0));
                        e1e_flush();
                }
        }
@@ -3580,7 +3580,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
        bool is_l4 = false;
        bool is_l2 = false;
        u32 regval;
-       s32 ret_val;
 
        if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
                return -EINVAL;
@@ -3719,16 +3718,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
        er32(RXSTMPH);
        er32(TXSTMPH);
 
-       /* Get and set the System Time Register SYSTIM base frequency */
-       ret_val = e1000e_get_base_timinca(adapter, &regval);
-       if (ret_val)
-               return ret_val;
-       ew32(TIMINCA, regval);
-
-       /* reset the ns time counter */
-       timecounter_init(&adapter->tc, &adapter->cc,
-                        ktime_to_ns(ktime_get_real()));
-
        return 0;
 }
 
@@ -3839,7 +3828,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
        /* update thresholds: prefetch threshold to 31, host threshold to 1
         * and make sure the granularity is "descriptors" and not "cache lines"
         */
-       rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+       rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC);
 
        ew32(RXDCTL(0), rxdctl);
        /* momentarily enable the RX ring for the changes to take effect */
@@ -3884,6 +3873,53 @@ static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
                e1000_flush_rx_ring(adapter);
 }
 
+/**
+ * e1000e_systim_reset - reset the timesync registers after a hardware reset
+ * @adapter: board private structure
+ *
+ * When the MAC is reset, all hardware bits for timesync will be reset to the
+ * default values. This function will restore the settings last in place.
+ * Since the clock SYSTIME registers are reset, we will simply restore the
+ * cyclecounter to the kernel real clock time.
+ **/
+static void e1000e_systim_reset(struct e1000_adapter *adapter)
+{
+       struct ptp_clock_info *info = &adapter->ptp_clock_info;
+       struct e1000_hw *hw = &adapter->hw;
+       unsigned long flags;
+       u32 timinca;
+       s32 ret_val;
+
+       if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+               return;
+
+       if (info->adjfreq) {
+               /* restore the previous ptp frequency delta */
+               ret_val = info->adjfreq(info, adapter->ptp_delta);
+       } else {
+               /* set the default base frequency if no adjustment possible */
+               ret_val = e1000e_get_base_timinca(adapter, &timinca);
+               if (!ret_val)
+                       ew32(TIMINCA, timinca);
+       }
+
+       if (ret_val) {
+               dev_warn(&adapter->pdev->dev,
+                        "Failed to restore TIMINCA clock rate delta: %d\n",
+                        ret_val);
+               return;
+       }
+
+       /* reset the systim ns time counter */
+       spin_lock_irqsave(&adapter->systim_lock, flags);
+       timecounter_init(&adapter->tc, &adapter->cc,
+                        ktime_to_ns(ktime_get_real()));
+       spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+       /* restore the previous hwtstamp configuration settings */
+       e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+}
+
 /**
  * e1000e_reset - bring the hardware into a known good state
  *
@@ -4063,8 +4099,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
 
        e1000e_reset_adaptive(hw);
 
-       /* initialize systim and reset the ns time counter */
-       e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+       /* restore systim and hwtstamp settings */
+       e1000e_systim_reset(adapter);
 
        /* Set EEE advertisement as appropriate */
        if (adapter->flags2 & FLAG2_HAS_EEE) {
@@ -4275,7 +4311,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
        struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
                                                     cc);
        struct e1000_hw *hw = &adapter->hw;
-       u32 systimel_1, systimel_2, systimeh;
+       u32 systimel, systimeh;
        cycle_t systim, systim_next;
        /* SYSTIMH latching upon SYSTIML read does not work well.
         * This means that if SYSTIML overflows after we read it but before
@@ -4283,24 +4319,25 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
         * will experience a huge non linear increment in the systime value
         * to fix that we test for overflow and if true, we re-read systime.
         */
-       systimel_1 = er32(SYSTIML);
+       systimel = er32(SYSTIML);
        systimeh = er32(SYSTIMH);
-       systimel_2 = er32(SYSTIML);
-       /* Check for overflow. If there was no overflow, use the values */
-       if (systimel_1 < systimel_2) {
-               systim = (cycle_t)systimel_1;
-               systim |= (cycle_t)systimeh << 32;
-       } else {
-               /* There was an overflow, read again SYSTIMH, and use
-                * systimel_2
-                */
-               systimeh = er32(SYSTIMH);
-               systim = (cycle_t)systimel_2;
-               systim |= (cycle_t)systimeh << 32;
+       /* Is systimel is so large that overflow is possible? */
+       if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
+               u32 systimel_2 = er32(SYSTIML);
+               if (systimel > systimel_2) {
+                       /* There was an overflow, read again SYSTIMH, and use
+                        * systimel_2
+                        */
+                       systimeh = er32(SYSTIMH);
+                       systimel = systimel_2;
+               }
        }
+       systim = (cycle_t)systimel;
+       systim |= (cycle_t)systimeh << 32;
 
        if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
-               u64 incvalue, time_delta, rem, temp;
+               u64 time_delta, rem, temp;
+               u32 incvalue;
                int i;
 
                /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
@@ -6861,7 +6898,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
 
        ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
        le16_to_cpus(&buf);
-       if (!ret_val && (!(buf & (1 << 0)))) {
+       if (!ret_val && (!(buf & BIT(0)))) {
                /* Deep Smart Power Down (DSPD) */
                dev_warn(&adapter->pdev->dev,
                         "Warning: detected DSPD enabled in EEPROM\n");
@@ -6965,7 +7002,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int bars, i, err, pci_using_dac;
        u16 eeprom_data = 0;
        u16 eeprom_apme_mask = E1000_EEPROM_APME;
-       s32 rval = 0;
+       s32 ret_val = 0;
 
        if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
                aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -7200,18 +7237,18 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
                if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
                    (adapter->hw.bus.func == 1))
-                       rval = e1000_read_nvm(&adapter->hw,
+                       ret_val = e1000_read_nvm(&adapter->hw,
                                              NVM_INIT_CONTROL3_PORT_B,
                                              1, &eeprom_data);
                else
-                       rval = e1000_read_nvm(&adapter->hw,
+                       ret_val = e1000_read_nvm(&adapter->hw,
                                              NVM_INIT_CONTROL3_PORT_A,
                                              1, &eeprom_data);
        }
 
        /* fetch WoL from EEPROM */
-       if (rval)
-               e_dbg("NVM read error getting WoL initial values: %d\n", rval);
+       if (ret_val)
+               e_dbg("NVM read error getting WoL initial values: %d\n", ret_val);
        else if (eeprom_data & eeprom_apme_mask)
                adapter->eeprom_wol |= E1000_WUFC_MAG;
 
@@ -7231,13 +7268,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                device_wakeup_enable(&pdev->dev);
 
        /* save off EEPROM version number */
-       rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+       ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
 
-       if (rval) {
-               e_dbg("NVM read error getting EEPROM version: %d\n", rval);
+       if (ret_val) {
+               e_dbg("NVM read error getting EEPROM version: %d\n", ret_val);
                adapter->eeprom_vers = 0;
        }
 
+       /* init PTP hardware clock */
+       e1000e_ptp_init(adapter);
+
        /* reset the hardware with the new settings */
        e1000e_reset(adapter);
 
@@ -7256,9 +7296,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
 
-       /* init PTP hardware clock */
-       e1000e_ptp_init(adapter);
-
        e1000_print_device_info(adapter);
 
        if (pci_dev_run_wake(pdev))
index 49f205c023bfc9025fe06c857a85d80ffaf94a80..2efd80dfd88e8dfbbc942fc12d926ab8e4718084 100644 (file)
@@ -67,7 +67,7 @@ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
        u32 eecd = er32(EECD);
        u32 mask;
 
-       mask = 0x01 << (count - 1);
+       mask = BIT(count - 1);
        if (nvm->type == e1000_nvm_eeprom_spi)
                eecd |= E1000_EECD_DO;
 
index de13aeacae97c85b34bffe28720a584d6ccbc709..d78d47b41a716e2cb5cbcf2843d923fa150d8e3b 100644 (file)
@@ -2894,11 +2894,11 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
                if ((hw->phy.type == e1000_phy_82578) &&
                    (hw->phy.revision >= 1) &&
                    (hw->phy.addr == 2) &&
-                   !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
+                   !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) {
                        u16 data2 = 0x7EFF;
 
                        ret_val = e1000_access_phy_debug_regs_hv(hw,
-                                                                (1 << 6) | 0x3,
+                                                                BIT(6) | 0x3,
                                                                 &data2, false);
                        if (ret_val)
                                goto out;
index 55bfe473514da920177f49fc7c0660445fb31598..3027f63ee793c37a4f802bc288e009fc5f4814f2 100644 (file)
@@ -104,9 +104,9 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
 #define BM_WUC_DATA_OPCODE             0x12
 #define BM_WUC_ENABLE_PAGE             BM_PORT_CTRL_PAGE
 #define BM_WUC_ENABLE_REG              17
-#define BM_WUC_ENABLE_BIT              (1 << 2)
-#define BM_WUC_HOST_WU_BIT             (1 << 4)
-#define BM_WUC_ME_WU_BIT               (1 << 5)
+#define BM_WUC_ENABLE_BIT              BIT(2)
+#define BM_WUC_HOST_WU_BIT             BIT(4)
+#define BM_WUC_ME_WU_BIT               BIT(5)
 
 #define PHY_UPPER_SHIFT                        21
 #define BM_PHY_REG(page, reg) \
@@ -124,8 +124,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
 #define I82578_ADDR_REG                        29
 #define I82577_ADDR_REG                        16
 #define I82577_CFG_REG                 22
-#define I82577_CFG_ASSERT_CRS_ON_TX    (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT    (3 << 10)       /* auto downshift */
+#define I82577_CFG_ASSERT_CRS_ON_TX    BIT(15)
+#define I82577_CFG_ENABLE_DOWNSHIFT    (3u << 10)      /* auto downshift */
 #define I82577_CTRL_REG                        23
 
 /* 82577 specific PHY registers */
index e2ff3ef75d5d66e97d5b67229764f9a5afc2c7ac..2e1b17ad52a3e55984fb8303ab191bb636aa8fc8 100644 (file)
@@ -79,6 +79,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
 
        ew32(TIMINCA, timinca);
 
+       adapter->ptp_delta = delta;
+
        spin_unlock_irqrestore(&adapter->systim_lock, flags);
 
        return 0;
index 2a6a5d3dd874e713e451f6347cdb2e34fcae98ef..9c44739da5e28f6352b4f8ae833d95517cbcd014 100644 (file)
 #define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 16)
 
 /* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG      BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG  BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR         BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS      BIT(3)
-#define I40E_PRIV_FLAGS_HW_ATR_EVICT   BIT(5)
+#define        I40E_PRIV_FLAGS_MFP_FLAG                BIT(0)
+#define        I40E_PRIV_FLAGS_LINKPOLL_FLAG           BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR                 BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS              BIT(3)
+#define I40E_PRIV_FLAGS_HW_ATR_EVICT           BIT(4)
+#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT   BIT(5)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
 #define I40E_OEM_VER_PATCH_MASK    0xff
 #define I40E_OEM_VER_BUILD_SHIFT   8
 #define I40E_OEM_VER_SHIFT         24
-#define I40E_PHY_DEBUG_PORT        BIT(4)
+#define I40E_PHY_DEBUG_ALL \
+       (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
+       I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
 
 /* The values in here are decimal coded as hex as is the case in the NVM map*/
 #define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -356,6 +359,7 @@ struct i40e_pf {
 #define I40E_FLAG_STOP_FW_LLDP                 BIT_ULL(47)
 #define I40E_FLAG_HAVE_10GBASET_PHY            BIT_ULL(48)
 #define I40E_FLAG_PF_MAC                       BIT_ULL(50)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT         BIT_ULL(51)
 
        /* tracks features that get auto disabled by errors */
        u64 auto_disable_flags;
index eacbe7430b4833279aa81c969d9f581c842deffb..11cf1a5ebccf0773fc281b0be6929c8f47cec9f7 100644 (file)
@@ -1833,7 +1833,10 @@ struct i40e_aqc_set_phy_debug {
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE  0x00
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD  0x01
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT  0x02
+/* Disable link manageability on a single port */
 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW      0x10
+/* Disable link manageability on all ports */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW  0x20
        u8      reserved[15];
 };
 
index 4a934e14574d4de7786dd06052f847b87a6c9ee8..422b41d61c9a59f9c1f506487c0dd0f883b55f50 100644 (file)
@@ -696,7 +696,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
        /* Non Tunneled IPv6 */
        I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
        I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
-       I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
+       I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
        I40E_PTT_UNUSED_ENTRY(91),
        I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
        I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -1972,10 +1972,12 @@ aq_add_vsi_exit:
  * @seid: vsi number
  * @set: set unicast promiscuous enable/disable
  * @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
  **/
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
                                u16 seid, bool set,
-                               struct i40e_asq_cmd_details *cmd_details)
+                               struct i40e_asq_cmd_details *cmd_details,
+                               bool rx_only_promisc)
 {
        struct i40e_aq_desc desc;
        struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -1988,8 +1990,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
 
        if (set) {
                flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
-               if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
-                   (hw->aq.api_maj_ver > 1))
+               if (rx_only_promisc &&
+                   (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+                    (hw->aq.api_maj_ver > 1)))
                        flags |= I40E_AQC_SET_VSI_PROMISC_TX;
        }
 
@@ -2282,6 +2285,35 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
        return status;
 }
 
+/**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @valid_flags: which bit flags to set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+                                               u16 flags,
+                                               u16 valid_flags,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_switch_config *scfg =
+               (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+       enum i40e_status_code status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_switch_config);
+       scfg->flags = cpu_to_le16(flags);
+       scfg->valid_flags = cpu_to_le16(valid_flags);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
 /**
  * i40e_aq_get_firmware_version
  * @hw: pointer to the hw struct
index 51a994d858708d455a2f3062c822588e51427957..5e8d84ff7d5f576b275ec060414fa9aa2a9d2ed2 100644 (file)
@@ -230,6 +230,17 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 
+static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = {
+       "MFP",
+       "LinkPolling",
+       "flow-director-atr",
+       "veb-stats",
+       "hw-atr-eviction",
+       "vf-true-promisc-support",
+};
+
+#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl)
+
 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
        "NPAR",
        "LinkPolling",
@@ -250,6 +261,110 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
                 "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
 }
 
+/**
+ * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @phy_types: PHY types to convert
+ * @supported: pointer to the ethtool supported variable to fill in
+ * @advertising: pointer to the ethtool advertising variable to fill in
+ *
+ **/
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
+                                    u32 *advertising)
+{
+       enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types;
+
+       *supported = 0x0;
+       *advertising = 0x0;
+
+       if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+               *supported |= SUPPORTED_Autoneg |
+                             SUPPORTED_1000baseT_Full;
+               *advertising |= ADVERTISED_Autoneg |
+                               ADVERTISED_1000baseT_Full;
+               if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+                       *supported |= SUPPORTED_100baseT_Full;
+                       *advertising |= ADVERTISED_100baseT_Full;
+               }
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+           phy_types & I40E_CAP_PHY_TYPE_XFI ||
+           phy_types & I40E_CAP_PHY_TYPE_SFI ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+               *supported |= SUPPORTED_10000baseT_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+               *supported |= SUPPORTED_Autoneg |
+                             SUPPORTED_10000baseT_Full;
+               *advertising |= ADVERTISED_Autoneg |
+                               ADVERTISED_10000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+           phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+           phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+               *supported |= SUPPORTED_40000baseCR4_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+               *supported |= SUPPORTED_Autoneg |
+                             SUPPORTED_40000baseCR4_Full;
+               *advertising |= ADVERTISED_Autoneg |
+                               ADVERTISED_40000baseCR4_Full;
+       }
+       if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+           !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+               *supported |= SUPPORTED_Autoneg |
+                             SUPPORTED_100baseT_Full;
+               *advertising |= ADVERTISED_Autoneg |
+                               ADVERTISED_100baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+               *supported |= SUPPORTED_Autoneg |
+                             SUPPORTED_1000baseT_Full;
+               *advertising |= ADVERTISED_Autoneg |
+                               ADVERTISED_1000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+               *supported |= SUPPORTED_40000baseSR4_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+               *supported |= SUPPORTED_40000baseLR4_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+               *supported |= SUPPORTED_40000baseKR4_Full |
+                             SUPPORTED_Autoneg;
+               *advertising |= ADVERTISED_40000baseKR4_Full |
+                               ADVERTISED_Autoneg;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+               *supported |= SUPPORTED_20000baseKR2_Full |
+                             SUPPORTED_Autoneg;
+               *advertising |= ADVERTISED_20000baseKR2_Full |
+                               ADVERTISED_Autoneg;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+               *supported |= SUPPORTED_10000baseKR_Full |
+                             SUPPORTED_Autoneg;
+               *advertising |= ADVERTISED_10000baseKR_Full |
+                               ADVERTISED_Autoneg;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+               *supported |= SUPPORTED_10000baseKX4_Full |
+                             SUPPORTED_Autoneg;
+               *advertising |= ADVERTISED_10000baseKX4_Full |
+                               ADVERTISED_Autoneg;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+               *supported |= SUPPORTED_1000baseKX_Full |
+                             SUPPORTED_Autoneg;
+               *advertising |= ADVERTISED_1000baseKX_Full |
+                               ADVERTISED_Autoneg;
+       }
+}
+
 /**
  * i40e_get_settings_link_up - Get the Link settings for when link is up
  * @hw: hw structure
@@ -264,6 +379,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
 {
        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
        u32 link_speed = hw_link_info->link_speed;
+       u32 e_advertising = 0x0;
+       u32 e_supported = 0x0;
 
        /* Initialize supported and advertised settings based on phy settings */
        switch (hw_link_info->phy_type) {
@@ -304,21 +421,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                break;
        case I40E_PHY_TYPE_10GBASE_T:
        case I40E_PHY_TYPE_1000BASE_T:
+       case I40E_PHY_TYPE_100BASE_TX:
                ecmd->supported = SUPPORTED_Autoneg |
                                  SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full;
+                                 SUPPORTED_1000baseT_Full |
+                                 SUPPORTED_100baseT_Full;
                ecmd->advertising = ADVERTISED_Autoneg;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               /* adding 100baseT support for 10GBASET_PHY */
-               if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) {
-                       ecmd->supported |= SUPPORTED_100baseT_Full;
-                       ecmd->advertising |= ADVERTISED_100baseT_Full |
-                                            ADVERTISED_1000baseT_Full |
-                                            ADVERTISED_10000baseT_Full;
-               }
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+                       ecmd->advertising |= ADVERTISED_100baseT_Full;
                break;
        case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
                ecmd->supported = SUPPORTED_Autoneg |
@@ -326,21 +440,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                ecmd->advertising = ADVERTISED_Autoneg |
                                    ADVERTISED_1000baseT_Full;
                break;
-       case I40E_PHY_TYPE_100BASE_TX:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_100baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
-                       ecmd->advertising |= ADVERTISED_100baseT_Full;
-               /* firmware detects 10G phy as 100M phy at 100M speed */
-               if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) {
-                       ecmd->supported |= SUPPORTED_10000baseT_Full |
-                                          SUPPORTED_1000baseT_Full;
-                       ecmd->advertising |= ADVERTISED_Autoneg |
-                                            ADVERTISED_100baseT_Full |
-                                            ADVERTISED_1000baseT_Full |
-                                            ADVERTISED_10000baseT_Full;
-               }
-               break;
        case I40E_PHY_TYPE_10GBASE_CR1_CU:
        case I40E_PHY_TYPE_10GBASE_CR1:
                ecmd->supported = SUPPORTED_Autoneg |
@@ -367,14 +466,23 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                                ecmd->advertising |= ADVERTISED_100baseT_Full;
                }
                break;
-       /* Backplane is set based on supported phy types in get_settings
-        * so don't set anything here but don't warn either
-        */
        case I40E_PHY_TYPE_40GBASE_KR4:
        case I40E_PHY_TYPE_20GBASE_KR2:
        case I40E_PHY_TYPE_10GBASE_KR:
        case I40E_PHY_TYPE_10GBASE_KX4:
        case I40E_PHY_TYPE_1000BASE_KX:
+               ecmd->supported |= SUPPORTED_40000baseKR4_Full |
+                                  SUPPORTED_20000baseKR2_Full |
+                                  SUPPORTED_10000baseKR_Full |
+                                  SUPPORTED_10000baseKX4_Full |
+                                  SUPPORTED_1000baseKX_Full |
+                                  SUPPORTED_Autoneg;
+               ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
+                                    ADVERTISED_20000baseKR2_Full |
+                                    ADVERTISED_10000baseKR_Full |
+                                    ADVERTISED_10000baseKX4_Full |
+                                    ADVERTISED_1000baseKX_Full |
+                                    ADVERTISED_Autoneg;
                break;
        default:
                /* if we got here and link is up something bad is afoot */
@@ -382,6 +490,16 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                            hw_link_info->phy_type);
        }
 
+       /* Now that we've worked out everything that could be supported by the
+        * current PHY type, get what is supported by the NVM and them to
+        * get what is truly supported
+        */
+       i40e_phy_type_to_ethtool(pf, &e_supported,
+                                &e_advertising);
+
+       ecmd->supported = ecmd->supported & e_supported;
+       ecmd->advertising = ecmd->advertising & e_advertising;
+
        /* Set speed and duplex */
        switch (link_speed) {
        case I40E_LINK_SPEED_40GB:
@@ -416,74 +534,11 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
                                        struct ethtool_cmd *ecmd,
                                        struct i40e_pf *pf)
 {
-       enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
-
        /* link is down and the driver needs to fall back on
         * supported phy types to figure out what info to display
         */
-       ecmd->supported = 0x0;
-       ecmd->advertising = 0x0;
-       if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
-               ecmd->supported |= SUPPORTED_Autoneg |
-                                  SUPPORTED_1000baseT_Full;
-               ecmd->advertising |= ADVERTISED_Autoneg |
-                                    ADVERTISED_1000baseT_Full;
-               if (pf->hw.mac.type == I40E_MAC_X722) {
-                       ecmd->supported |= SUPPORTED_100baseT_Full;
-                       ecmd->advertising |= ADVERTISED_100baseT_Full;
-                       if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
-                               ecmd->supported |= SUPPORTED_100baseT_Full;
-                               ecmd->advertising |= ADVERTISED_100baseT_Full;
-                       }
-               }
-       }
-       if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
-           phy_types & I40E_CAP_PHY_TYPE_XFI ||
-           phy_types & I40E_CAP_PHY_TYPE_SFI ||
-           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
-           phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
-               ecmd->supported |= SUPPORTED_10000baseT_Full;
-       if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
-           phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
-           phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
-           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
-           phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
-               ecmd->supported |= SUPPORTED_Autoneg |
-                                  SUPPORTED_10000baseT_Full;
-               ecmd->advertising |= ADVERTISED_Autoneg |
-                                    ADVERTISED_10000baseT_Full;
-       }
-       if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
-           phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
-           phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
-               ecmd->supported |= SUPPORTED_40000baseCR4_Full;
-       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
-           phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
-               ecmd->supported |= SUPPORTED_Autoneg |
-                                 SUPPORTED_40000baseCR4_Full;
-               ecmd->advertising |= ADVERTISED_Autoneg |
-                                   ADVERTISED_40000baseCR4_Full;
-       }
-       if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
-           !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
-               ecmd->supported |= SUPPORTED_Autoneg |
-                                  SUPPORTED_100baseT_Full;
-               ecmd->advertising |= ADVERTISED_Autoneg |
-                                    ADVERTISED_100baseT_Full;
-       }
-       if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
-           phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
-           phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
-           phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
-               ecmd->supported |= SUPPORTED_Autoneg |
-                                  SUPPORTED_1000baseT_Full;
-               ecmd->advertising |= ADVERTISED_Autoneg |
-                                    ADVERTISED_1000baseT_Full;
-       }
-       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
-               ecmd->supported |= SUPPORTED_40000baseSR4_Full;
-       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
-               ecmd->supported |= SUPPORTED_40000baseLR4_Full;
+       i40e_phy_type_to_ethtool(pf, &ecmd->supported,
+                                &ecmd->advertising);
 
        /* With no link speed and duplex are unknown */
        ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -512,38 +567,6 @@ static int i40e_get_settings(struct net_device *netdev,
                i40e_get_settings_link_down(hw, ecmd, pf);
 
        /* Now set the settings that don't rely on link being up/down */
-
-       /* For backplane, supported and advertised are only reliant on the
-        * phy types the NVM specifies are supported.
-        */
-       if (hw->device_id == I40E_DEV_ID_KX_B ||
-           hw->device_id == I40E_DEV_ID_KX_C ||
-           hw->device_id == I40E_DEV_ID_20G_KR2 ||
-           hw->device_id ==  I40E_DEV_ID_20G_KR2_A) {
-               ecmd->supported = SUPPORTED_Autoneg;
-               ecmd->advertising = ADVERTISED_Autoneg;
-               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
-                       ecmd->supported |= SUPPORTED_40000baseKR4_Full;
-                       ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
-               }
-               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
-                       ecmd->supported |= SUPPORTED_20000baseKR2_Full;
-                       ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
-               }
-               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
-                       ecmd->supported |= SUPPORTED_10000baseKR_Full;
-                       ecmd->advertising |= ADVERTISED_10000baseKR_Full;
-               }
-               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
-                       ecmd->supported |= SUPPORTED_10000baseKX4_Full;
-                       ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
-               }
-               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
-                       ecmd->supported |= SUPPORTED_1000baseKX_Full;
-                       ecmd->advertising |= ADVERTISED_1000baseKX_Full;
-               }
-       }
-
        /* Set autoneg settings */
        ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
                          AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -1158,6 +1181,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
                sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
                sizeof(drvinfo->bus_info));
+       if (pf->hw.pf_id == 0)
+               drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN;
+       else
+               drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
 }
 
 static void i40e_get_ringparam(struct net_device *netdev,
@@ -1385,7 +1412,10 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
                        return I40E_VSI_STATS_LEN(netdev);
                }
        case ETH_SS_PRIV_FLAGS:
-               return I40E_PRIV_FLAGS_STR_LEN;
+               if (pf->hw.pf_id == 0)
+                       return I40E_PRIV_FLAGS_GL_STR_LEN;
+               else
+                       return I40E_PRIV_FLAGS_STR_LEN;
        default:
                return -EOPNOTSUPP;
        }
@@ -1583,10 +1613,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
                break;
        case ETH_SS_PRIV_FLAGS:
-               for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
-                       memcpy(data, i40e_priv_flags_strings[i],
-                              ETH_GSTRING_LEN);
-                       data += ETH_GSTRING_LEN;
+               if (pf->hw.pf_id == 0) {
+                       for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) {
+                               memcpy(data, i40e_priv_flags_strings_gl[i],
+                                      ETH_GSTRING_LEN);
+                               data += ETH_GSTRING_LEN;
+                       }
+               } else {
+                       for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+                               memcpy(data, i40e_priv_flags_strings[i],
+                                      ETH_GSTRING_LEN);
+                               data += ETH_GSTRING_LEN;
+                       }
                }
                break;
        default:
@@ -1880,7 +1918,7 @@ static int i40e_set_phys_id(struct net_device *netdev,
                if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
                        pf->led_status = i40e_led_get(hw);
                } else {
-                       i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
+                       i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
                        ret = i40e_led_get_phy(hw, &temp_status,
                                               &pf->phy_led_val);
                        pf->led_status = temp_status;
@@ -2848,8 +2886,6 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
        struct i40e_pf *pf = vsi->back;
        u32 ret_flags = 0;
 
-       ret_flags |= pf->hw.func_caps.npar_enable ?
-               I40E_PRIV_FLAGS_NPAR_FLAG : 0;
        ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
                I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
        ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
@@ -2858,6 +2894,10 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
                I40E_PRIV_FLAGS_VEB_STATS : 0;
        ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
                0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
+       if (pf->hw.pf_id == 0) {
+               ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
+                       I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+       }
 
        return ret_flags;
 }
@@ -2872,7 +2912,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
+       u16 sw_flags = 0, valid_flags = 0;
        bool reset_required = false;
+       bool promisc_change = false;
+       int ret;
 
        /* NOTE: MFP is not settable */
 
@@ -2902,6 +2945,33 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
                reset_required = true;
        }
 
+       if (pf->hw.pf_id == 0) {
+               if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+                   !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+                       pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
+                       promisc_change = true;
+               } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+                          (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+                       pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
+                       promisc_change = true;
+               }
+       }
+       if (promisc_change) {
+               if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+                       sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+               valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+               ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
+                                               NULL);
+               if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+                       dev_info(&pf->pdev->dev,
+                                "couldn't set switch config bits, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       /* not a fatal problem, just keep going */
+               }
+       }
+
        if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
            (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
                pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
index 5ebe12d56ebf422b6273d0e810be8bbf113ced04..a7c7b1d9b7c81851d10b0de02cde2a7fc635a128 100644 (file)
@@ -49,7 +49,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
        struct i40e_hmc_sd_entry *sd_entry;
        bool dma_mem_alloc_done = false;
        struct i40e_dma_mem mem;
-       i40e_status ret_code;
+       i40e_status ret_code = I40E_SUCCESS;
        u64 alloc_len;
 
        if (NULL == hmc_info->sd_table.sd_entry) {
index 46a3a674c635b98ba4aaf1dc038279401df08f0d..1cd0ebf7520ac77079f42342c4b868a1608d603f 100644 (file)
@@ -46,7 +46,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 5
-#define DRV_VERSION_BUILD 10
+#define DRV_VERSION_BUILD 16
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -2128,7 +2128,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                        aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
                                                          &vsi->back->hw,
                                                          vsi->seid,
-                                                         cur_promisc, NULL);
+                                                         cur_promisc, NULL,
+                                                         true);
                        if (aq_ret) {
                                retval =
                                i40e_aq_rc_to_posix(aq_ret,
@@ -9361,7 +9362,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        ctxt.info.valid_sections |=
                                cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
                        ctxt.info.queueing_opt_flags |=
-                                               I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+                               (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
+                                I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
                }
 
                ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
@@ -10407,6 +10409,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
  **/
 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 {
+       u16 flags = 0;
        int ret;
 
        /* find out what's out there already */
@@ -10420,6 +10423,32 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
        }
        i40e_pf_reset_stats(pf);
 
+       /* set the switch config bit for the whole device to
+        * support limited promisc or true promisc
+        * when user requests promisc. The default is limited
+        * promisc.
+       */
+
+       if ((pf->hw.pf_id == 0) &&
+           !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+               flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+
+       if (pf->hw.pf_id == 0) {
+               u16 valid_flags;
+
+               valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+               ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
+                                               NULL);
+               if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+                       dev_info(&pf->pdev->dev,
+                                "couldn't set switch config bits, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       /* not a fatal problem, just keep going */
+               }
+       }
+
        /* first time setup */
        if (pf->lan_vsi == I40E_NO_VSI || reinit) {
                struct i40e_vsi *vsi = NULL;
index 4c8977c805dfa6af54e14e09877b7d7708be63d0..80403c6ee7f073fccd2211ba3eec4e847014519e 100644 (file)
@@ -130,7 +130,8 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
                                u16 vsi_id, bool set_filter,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
-               u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+               u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+               bool rx_only_promisc);
 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
                u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
@@ -182,6 +183,10 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
                                struct i40e_aqc_get_switch_config_resp *buf,
                                u16 buf_size, u16 *start_seid,
                                struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+                                               u16 flags,
+                                               u16 valid_flags,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
                                enum i40e_aq_resources_ids resource,
                                enum i40e_aq_resource_access_type access,
index a1b878abd5b067489f431e0356b120b405e723bf..ed39cbad24bdd2c728c4d54817f31ac664c13a07 100644 (file)
@@ -289,9 +289,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
                rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
                pf->last_rx_ptp_check = jiffies;
                pf->rx_hwtstamp_cleared++;
-               dev_warn(&vsi->back->pdev->dev,
-                        "%s: clearing Rx timestamp hang\n",
-                        __func__);
+               WARN_ONCE(1, "Detected Rx timestamp register hang\n");
        }
 }
 
index b0edffe88492d1e6372bf7187032081abc4acdb0..99a524db5560cbcb2ffb92505f371532336d8d19 100644 (file)
@@ -1394,7 +1394,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
                cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
                            I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
 
-       if (ring->netdev->features & NETIF_F_RXHASH)
+       if (!(ring->netdev->features & NETIF_F_RXHASH))
                return;
 
        if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
index a9b04e72df82142138a606970fcf6c5a9e1f7d93..1fcafcfa8f14fe5a6fe0e693715bb06cdc7023aa 100644 (file)
@@ -665,8 +665,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                goto error_alloc_vsi_res;
        }
        if (type == I40E_VSI_SRIOV) {
-               u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
                vf->lan_vsi_idx = vsi->idx;
                vf->lan_vsi_id = vsi->id;
                /* If the port VLAN has been configured and then the
@@ -688,12 +686,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                                         "Could not add MAC filter %pM for VF %d\n",
                                        vf->default_lan_addr.addr, vf->vf_id);
                }
-               f = i40e_add_filter(vsi, brdcast,
-                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
-                                   true, false);
-               if (!f)
-                       dev_info(&pf->pdev->dev,
-                                "Could not allocate VF broadcast filter\n");
                spin_unlock_bh(&vsi->mac_filter_list_lock);
        }
 
@@ -1474,12 +1466,16 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
 
        vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+       if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
                dev_err(&pf->pdev->dev,
-                       "VF %d doesn't meet requirements to enter promiscuous mode\n",
+                       "Unprivileged VF %d is attempting to configure promiscuous mode\n",
                        vf->vf_id);
-               aq_ret = I40E_ERR_PARAM;
+               /* Lie to the VF on purpose. */
+               aq_ret = 0;
                goto error_param;
        }
        /* Multicast promiscuous handling*/
@@ -1562,7 +1558,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
                }
        } else {
                aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
-                                                            allmulti, NULL);
+                                                            allmulti, NULL,
+                                                            true);
                aq_err = pf->hw.aq.asq_last_status;
                if (aq_ret)
                        dev_err(&pf->pdev->dev,
index fa044a904208ce74a176fac36c9792b67504006a..76ed97db28e2e883f51a7ca5fc9e225a31d8396f 100644 (file)
@@ -215,6 +215,7 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE     BIT(12)
 #define I40EVF_FLAG_ADDR_SET_BY_PF             BIT(13)
 #define I40EVF_FLAG_PROMISC_ON                 BIT(15)
+#define I40EVF_FLAG_ALLMULTI_ON                        BIT(16)
 /* duplicates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED              0
 #define I40E_FLAG_DCB_ENABLED                   0
@@ -241,6 +242,8 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_AQ_SET_RSS_LUT             BIT(14)
 #define I40EVF_FLAG_AQ_REQUEST_PROMISC         BIT(15)
 #define I40EVF_FLAG_AQ_RELEASE_PROMISC         BIT(16)
+#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI                BIT(17)
+#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI                BIT(18)
 
        /* OS defined structs */
        struct net_device *netdev;
index b548dbe78cd3439bc0293c27283ee8a072536e2c..642bb45ed9067bac11d7ee9469f10e44b29b42e1 100644 (file)
@@ -934,6 +934,13 @@ bottom_of_search_loop:
                 adapter->flags & I40EVF_FLAG_PROMISC_ON)
                adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
 
+       if (netdev->flags & IFF_ALLMULTI &&
+           !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
+               adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+       else if (!(netdev->flags & IFF_ALLMULTI) &&
+                adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
+               adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
+
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
 
@@ -1612,7 +1619,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
                goto watchdog_done;
        }
 
-       if (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) {
+       if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
+               i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
+               goto watchdog_done;
+       }
+
+       if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
+           (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
                i40evf_set_promiscuous(adapter, 0);
                goto watchdog_done;
        }
index c5d33a2cea877752cb7c7b371b9299fdd3760f88..f13445691507c2b7ea668f402cbb04e6f65cf38c 100644 (file)
@@ -641,6 +641,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
 void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
 {
        struct i40e_virtchnl_promisc_info vpi;
+       int promisc_all;
 
        if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
                /* bail because we already have a command pending */
@@ -649,11 +650,21 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
                return;
        }
 
-       if (flags) {
+       promisc_all = I40E_FLAG_VF_UNICAST_PROMISC |
+                     I40E_FLAG_VF_MULTICAST_PROMISC;
+       if ((flags & promisc_all) == promisc_all) {
                adapter->flags |= I40EVF_FLAG_PROMISC_ON;
                adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
                dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
-       } else {
+       }
+
+       if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) {
+               adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
+               adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+               dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+       }
+
+       if (!flags) {
                adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
                adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
                dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
index a23aa6704394b46fd3d7530f6ea8fadd48dd431b..a61447fd778eb579ef0b31d1a0d4c33a9e5386ee 100644 (file)
@@ -361,7 +361,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
        if (size > 15)
                size = 15;
 
-       nvm->word_size = 1 << size;
+       nvm->word_size = BIT(size);
        nvm->opcode_bits = 8;
        nvm->delay_usec = 1;
 
@@ -380,7 +380,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
                                    16 : 8;
                break;
        }
-       if (nvm->word_size == (1 << 15))
+       if (nvm->word_size == BIT(15))
                nvm->page_size = 128;
 
        nvm->type = e1000_nvm_eeprom_spi;
@@ -391,7 +391,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
        nvm->ops.write = igb_write_nvm_spi;
        nvm->ops.validate = igb_validate_nvm_checksum;
        nvm->ops.update = igb_update_nvm_checksum;
-       if (nvm->word_size < (1 << 15))
+       if (nvm->word_size < BIT(15))
                nvm->ops.read = igb_read_nvm_eerd;
        else
                nvm->ops.read = igb_read_nvm_spi;
@@ -2107,7 +2107,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
                /* The PF can spoof - it has to in order to
                 * support emulation mode NICs
                 */
-               reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+               reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS));
        } else {
                reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
                             E1000_DTXSWC_VLAN_SPOOF_MASK);
index de8805a2a2feff757c21099ccc52223b98c0c83b..199ff98209cfd00c4714e0e5e5c57e30949cb38f 100644 (file)
@@ -168,16 +168,16 @@ struct e1000_adv_tx_context_desc {
 #define E1000_DCA_CTRL_DCA_MODE_CB2     0x02 /* DCA Mode CB2 */
 
 #define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
 
 #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
 
 /* Additional DCA related definitions, note change in position of CPUID */
 #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
@@ -186,8 +186,8 @@ struct e1000_adv_tx_context_desc {
 #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
 
 /* ETQF register bit definitions */
-#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
-#define E1000_ETQF_1588            (1 << 30)
+#define E1000_ETQF_FILTER_ENABLE   BIT(26)
+#define E1000_ETQF_1588            BIT(30)
 
 /* FTQF register bit definitions */
 #define E1000_FTQF_VF_BP               0x00008000
@@ -203,16 +203,16 @@ struct e1000_adv_tx_context_desc {
 #define E1000_DTXSWC_VLAN_SPOOF_MASK  0x0000FF00 /* Per VF VLAN spoof control */
 #define E1000_DTXSWC_LLE_MASK         0x00FF0000 /* Per VF Local LB enables */
 #define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31)  /* global VF LB enable */
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31)  /* global VF LB enable */
 
 /* Easy defines for setting default pool, would normally be left a zero */
 #define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
 #define E1000_VT_CTL_DEFAULT_POOL_MASK  (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
 
 /* Other useful VMD_CTL register defines */
-#define E1000_VT_CTL_IGNORE_MAC         (1 << 28)
-#define E1000_VT_CTL_DISABLE_DEF_POOL   (1 << 29)
-#define E1000_VT_CTL_VM_REPL_EN         (1 << 30)
+#define E1000_VT_CTL_IGNORE_MAC         BIT(28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL   BIT(29)
+#define E1000_VT_CTL_VM_REPL_EN         BIT(30)
 
 /* Per VM Offload register setup */
 #define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
@@ -252,7 +252,7 @@ struct e1000_adv_tx_context_desc {
 #define E1000_DTXCTL_MDP_EN     0x0020
 #define E1000_DTXCTL_SPOOF_INT  0x0040
 
-#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT   (1 << 14)
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT   BIT(14)
 
 #define ALL_QUEUES   0xFFFF
 
index e9f23ee8f15efa76950fd21a3fdd52950a139663..2997c443c5dc831dc3641e76154b3e30c290bf28 100644 (file)
 
 /* Time Sync Interrupt Cause/Mask Register Bits */
 
-#define TSINTR_SYS_WRAP  (1 << 0) /* SYSTIM Wrap around. */
-#define TSINTR_TXTS      (1 << 1) /* Transmit Timestamp. */
-#define TSINTR_RXTS      (1 << 2) /* Receive Timestamp. */
-#define TSINTR_TT0       (1 << 3) /* Target Time 0 Trigger. */
-#define TSINTR_TT1       (1 << 4) /* Target Time 1 Trigger. */
-#define TSINTR_AUTT0     (1 << 5) /* Auxiliary Timestamp 0 Taken. */
-#define TSINTR_AUTT1     (1 << 6) /* Auxiliary Timestamp 1 Taken. */
-#define TSINTR_TADJ      (1 << 7) /* Time Adjust Done. */
+#define TSINTR_SYS_WRAP  BIT(0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS      BIT(1) /* Transmit Timestamp. */
+#define TSINTR_RXTS      BIT(2) /* Receive Timestamp. */
+#define TSINTR_TT0       BIT(3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1       BIT(4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0     BIT(5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1     BIT(6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ      BIT(7) /* Time Adjust Done. */
 
 #define TSYNC_INTERRUPTS TSINTR_TXTS
 #define E1000_TSICR_TXTS TSINTR_TXTS
 
 /* TSAUXC Configuration Bits */
-#define TSAUXC_EN_TT0    (1 << 0)  /* Enable target time 0. */
-#define TSAUXC_EN_TT1    (1 << 1)  /* Enable target time 1. */
-#define TSAUXC_EN_CLK0   (1 << 2)  /* Enable Configurable Frequency Clock 0. */
-#define TSAUXC_SAMP_AUT0 (1 << 3)  /* Latch SYSTIML/H into AUXSTMPL/0. */
-#define TSAUXC_ST0       (1 << 4)  /* Start Clock 0 Toggle on Target Time 0. */
-#define TSAUXC_EN_CLK1   (1 << 5)  /* Enable Configurable Frequency Clock 1. */
-#define TSAUXC_SAMP_AUT1 (1 << 6)  /* Latch SYSTIML/H into AUXSTMPL/1. */
-#define TSAUXC_ST1       (1 << 7)  /* Start Clock 1 Toggle on Target Time 1. */
-#define TSAUXC_EN_TS0    (1 << 8)  /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT0     (1 << 9)  /* Auxiliary Timestamp Taken. */
-#define TSAUXC_EN_TS1    (1 << 10) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT1     (1 << 11) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_PLSG      (1 << 17) /* Generate a pulse. */
-#define TSAUXC_DISABLE   (1 << 31) /* Disable SYSTIM Count Operation. */
+#define TSAUXC_EN_TT0    BIT(0)  /* Enable target time 0. */
+#define TSAUXC_EN_TT1    BIT(1)  /* Enable target time 1. */
+#define TSAUXC_EN_CLK0   BIT(2)  /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 BIT(3)  /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0       BIT(4)  /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1   BIT(5)  /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 BIT(6)  /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1       BIT(7)  /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0    BIT(8)  /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0     BIT(9)  /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1    BIT(10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1     BIT(11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG      BIT(17) /* Generate a pulse. */
+#define TSAUXC_DISABLE   BIT(31) /* Disable SYSTIM Count Operation. */
 
 /* SDP Configuration Bits */
-#define AUX0_SEL_SDP0    (0 << 0)  /* Assign SDP0 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP1    (1 << 0)  /* Assign SDP1 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP2    (2 << 0)  /* Assign SDP2 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP3    (3 << 0)  /* Assign SDP3 to auxiliary time stamp 0. */
-#define AUX0_TS_SDP_EN   (1 << 2)  /* Enable auxiliary time stamp trigger 0. */
-#define AUX1_SEL_SDP0    (0 << 3)  /* Assign SDP0 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP1    (1 << 3)  /* Assign SDP1 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP2    (2 << 3)  /* Assign SDP2 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP3    (3 << 3)  /* Assign SDP3 to auxiliary time stamp 1. */
-#define AUX1_TS_SDP_EN   (1 << 5)  /* Enable auxiliary time stamp trigger 1. */
-#define TS_SDP0_SEL_TT0  (0 << 6)  /* Target time 0 is output on SDP0. */
-#define TS_SDP0_SEL_TT1  (1 << 6)  /* Target time 1 is output on SDP0. */
-#define TS_SDP0_SEL_FC0  (2 << 6)  /* Freq clock  0 is output on SDP0. */
-#define TS_SDP0_SEL_FC1  (3 << 6)  /* Freq clock  1 is output on SDP0. */
-#define TS_SDP0_EN       (1 << 8)  /* SDP0 is assigned to Tsync. */
-#define TS_SDP1_SEL_TT0  (0 << 9)  /* Target time 0 is output on SDP1. */
-#define TS_SDP1_SEL_TT1  (1 << 9)  /* Target time 1 is output on SDP1. */
-#define TS_SDP1_SEL_FC0  (2 << 9)  /* Freq clock  0 is output on SDP1. */
-#define TS_SDP1_SEL_FC1  (3 << 9)  /* Freq clock  1 is output on SDP1. */
-#define TS_SDP1_EN       (1 << 11) /* SDP1 is assigned to Tsync. */
-#define TS_SDP2_SEL_TT0  (0 << 12) /* Target time 0 is output on SDP2. */
-#define TS_SDP2_SEL_TT1  (1 << 12) /* Target time 1 is output on SDP2. */
-#define TS_SDP2_SEL_FC0  (2 << 12) /* Freq clock  0 is output on SDP2. */
-#define TS_SDP2_SEL_FC1  (3 << 12) /* Freq clock  1 is output on SDP2. */
-#define TS_SDP2_EN       (1 << 14) /* SDP2 is assigned to Tsync. */
-#define TS_SDP3_SEL_TT0  (0 << 15) /* Target time 0 is output on SDP3. */
-#define TS_SDP3_SEL_TT1  (1 << 15) /* Target time 1 is output on SDP3. */
-#define TS_SDP3_SEL_FC0  (2 << 15) /* Freq clock  0 is output on SDP3. */
-#define TS_SDP3_SEL_FC1  (3 << 15) /* Freq clock  1 is output on SDP3. */
-#define TS_SDP3_EN       (1 << 17) /* SDP3 is assigned to Tsync. */
+#define AUX0_SEL_SDP0    (0u << 0)  /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1    (1u << 0)  /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2    (2u << 0)  /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3    (3u << 0)  /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN   (1u << 2)  /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0    (0u << 3)  /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1    (1u << 3)  /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2    (2u << 3)  /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3    (3u << 3)  /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN   (1u << 5)  /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0  (0u << 6)  /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1  (1u << 6)  /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0  (2u << 6)  /* Freq clock  0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1  (3u << 6)  /* Freq clock  1 is output on SDP0. */
+#define TS_SDP0_EN       (1u << 8)  /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0  (0u << 9)  /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1  (1u << 9)  /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0  (2u << 9)  /* Freq clock  0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1  (3u << 9)  /* Freq clock  1 is output on SDP1. */
+#define TS_SDP1_EN       (1u << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0  (0u << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1  (1u << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0  (2u << 12) /* Freq clock  0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1  (3u << 12) /* Freq clock  1 is output on SDP2. */
+#define TS_SDP2_EN       (1u << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0  (0u << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1  (1u << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0  (2u << 15) /* Freq clock  0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1  (3u << 15) /* Freq clock  1 is output on SDP3. */
+#define TS_SDP3_EN       (1u << 17) /* SDP3 is assigned to Tsync. */
 
 #define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
 #define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
 #define E1000_M88E1543_FIBER_CTRL      0x0
 #define E1000_EEE_ADV_DEV_I354         7
 #define E1000_EEE_ADV_ADDR_I354                60
-#define E1000_EEE_ADV_100_SUPPORTED    (1 << 1)   /* 100BaseTx EEE Supported */
-#define E1000_EEE_ADV_1000_SUPPORTED   (1 << 2)   /* 1000BaseT EEE Supported */
+#define E1000_EEE_ADV_100_SUPPORTED    BIT(1)   /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED   BIT(2)   /* 1000BaseT EEE Supported */
 #define E1000_PCS_STATUS_DEV_I354      3
 #define E1000_PCS_STATUS_ADDR_I354     1
 #define E1000_PCS_STATUS_TX_LPI_IND    0x0200     /* Tx in LPI state */
index 07cf4fe5833815b4cac586656551782b4649a27c..5010e2232c504dda296b7d19cc5c5f9931f08202 100644 (file)
@@ -212,7 +212,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
         *    bits[4-0]:  which bit in the register
         */
        regidx = vlan / 32;
-       vfta_delta = 1 << (vlan % 32);
+       vfta_delta = BIT(vlan % 32);
        vfta = adapter->shadow_vfta[regidx];
 
        /* vfta_delta represents the difference between the current value
@@ -243,12 +243,12 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
        bits = rd32(E1000_VLVF(vlvf_index));
 
        /* set the pool bit */
-       bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+       bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
        if (vlan_on)
                goto vlvf_update;
 
        /* clear the pool bit */
-       bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+       bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
 
        if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
                /* Clear VFTA first, then disable VLVF.  Otherwise
@@ -427,7 +427,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
 
        mta = array_rd32(E1000_MTA, hash_reg);
 
-       mta |= (1 << hash_bit);
+       mta |= BIT(hash_bit);
 
        array_wr32(E1000_MTA, hash_reg, mta);
        wrfl();
@@ -527,7 +527,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
                hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
                hash_bit = hash_value & 0x1F;
 
-               hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+               hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
                mc_addr_list += (ETH_ALEN);
        }
 
index 10f5c9e016a965096b7d02250d340238edd68f48..00e263f0c030f9d7ef86dbdaedc8f2f30473f94d 100644 (file)
@@ -302,9 +302,9 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
        u32 vflre = rd32(E1000_VFLRE);
        s32 ret_val = -E1000_ERR_MBX;
 
-       if (vflre & (1 << vf_number)) {
+       if (vflre & BIT(vf_number)) {
                ret_val = 0;
-               wr32(E1000_VFLRE, (1 << vf_number));
+               wr32(E1000_VFLRE, BIT(vf_number));
                hw->mbx.stats.rsts++;
        }
 
index e8280d0d7f022942c81d399945259cf4b0a5d1f9..3582c5cf88439713a2cf520aed8e85c1cc5c5ba4 100644 (file)
@@ -72,7 +72,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
        u32 eecd = rd32(E1000_EECD);
        u32 mask;
 
-       mask = 0x01 << (count - 1);
+       mask = 1u << (count - 1);
        if (nvm->type == e1000_nvm_eeprom_spi)
                eecd |= E1000_EECD_DO;
 
index 969a6ddafa3bc294adf1387c8e498ed6d031e157..9b622b33bb5accb6c33f0059c98fc64e9e2f4d8e 100644 (file)
@@ -91,10 +91,10 @@ s32  igb_check_polarity_m88(struct e1000_hw *hw);
 
 #define I82580_ADDR_REG                   16
 #define I82580_CFG_REG                    22
-#define I82580_CFG_ASSERT_CRS_ON_TX       (1 << 15)
-#define I82580_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
+#define I82580_CFG_ASSERT_CRS_ON_TX       BIT(15)
+#define I82580_CFG_ENABLE_DOWNSHIFT       (3u << 10) /* auto downshift 100/10 */
 #define I82580_CTRL_REG                   23
-#define I82580_CTRL_DOWNSHIFT_MASK        (7 << 10)
+#define I82580_CTRL_DOWNSHIFT_MASK        (7u << 10)
 
 /* 82580 specific PHY registers */
 #define I82580_PHY_CTRL_2            18
index 9413fa61392f0afc596969f1e9e9003b4245831c..b9609afa5ca35e4163fc5db979075bf82f7f44cb 100644 (file)
@@ -91,6 +91,14 @@ struct igb_adapter;
 #define NVM_COMB_VER_OFF       0x0083
 #define NVM_COMB_VER_PTR       0x003d
 
+/* Transmit and receive latency (for PTP timestamps) */
+#define IGB_I210_TX_LATENCY_10         9542
+#define IGB_I210_TX_LATENCY_100                1024
+#define IGB_I210_TX_LATENCY_1000       178
+#define IGB_I210_RX_LATENCY_10         20662
+#define IGB_I210_RX_LATENCY_100                2213
+#define IGB_I210_RX_LATENCY_1000       448
+
 struct vf_data_storage {
        unsigned char vf_mac_addresses[ETH_ALEN];
        u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -169,7 +177,7 @@ enum igb_tx_flags {
  * maintain a power of two alignment we have to limit ourselves to 32K.
  */
 #define IGB_MAX_TXD_PWR        15
-#define IGB_MAX_DATA_PER_TXD   (1 << IGB_MAX_TXD_PWR)
+#define IGB_MAX_DATA_PER_TXD   (1u << IGB_MAX_TXD_PWR)
 
 /* Tx Descriptors needed, worst case */
 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
@@ -466,21 +474,21 @@ struct igb_adapter {
        u16 eee_advert;
 };
 
-#define IGB_FLAG_HAS_MSI               (1 << 0)
-#define IGB_FLAG_DCA_ENABLED           (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A           (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS           (1 << 3)
-#define IGB_FLAG_DMAC                  (1 << 4)
-#define IGB_FLAG_PTP                   (1 << 5)
-#define IGB_FLAG_RSS_FIELD_IPV4_UDP    (1 << 6)
-#define IGB_FLAG_RSS_FIELD_IPV6_UDP    (1 << 7)
-#define IGB_FLAG_WOL_SUPPORTED         (1 << 8)
-#define IGB_FLAG_NEED_LINK_UPDATE      (1 << 9)
-#define IGB_FLAG_MEDIA_RESET           (1 << 10)
-#define IGB_FLAG_MAS_CAPABLE           (1 << 11)
-#define IGB_FLAG_MAS_ENABLE            (1 << 12)
-#define IGB_FLAG_HAS_MSIX              (1 << 13)
-#define IGB_FLAG_EEE                   (1 << 14)
+#define IGB_FLAG_HAS_MSI               BIT(0)
+#define IGB_FLAG_DCA_ENABLED           BIT(1)
+#define IGB_FLAG_QUAD_PORT_A           BIT(2)
+#define IGB_FLAG_QUEUE_PAIRS           BIT(3)
+#define IGB_FLAG_DMAC                  BIT(4)
+#define IGB_FLAG_PTP                   BIT(5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP    BIT(6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP    BIT(7)
+#define IGB_FLAG_WOL_SUPPORTED         BIT(8)
+#define IGB_FLAG_NEED_LINK_UPDATE      BIT(9)
+#define IGB_FLAG_MEDIA_RESET           BIT(10)
+#define IGB_FLAG_MAS_CAPABLE           BIT(11)
+#define IGB_FLAG_MAS_ENABLE            BIT(12)
+#define IGB_FLAG_HAS_MSIX              BIT(13)
+#define IGB_FLAG_EEE                   BIT(14)
 #define IGB_FLAG_VLAN_PROMISC          BIT(15)
 
 /* Media Auto Sense */
index bb4d6cdcd0b8b8cf742128eaaac80c40fd3ba07d..64e91c575a39dc87b1dd74e1b2303497e6acd2ee 100644 (file)
@@ -466,7 +466,7 @@ static void igb_get_regs(struct net_device *netdev,
 
        memset(p, 0, IGB_REGS_LEN * sizeof(u32));
 
-       regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+       regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
 
        /* General Registers */
        regs_buff[0] = rd32(E1000_CTRL);
@@ -1448,7 +1448,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
        /* Test each interrupt */
        for (; i < 31; i++) {
                /* Interrupt to test */
-               mask = 1 << i;
+               mask = BIT(i);
 
                if (!(mask & ics_mask))
                        continue;
@@ -2411,19 +2411,19 @@ static int igb_get_ts_info(struct net_device *dev,
                        SOF_TIMESTAMPING_RAW_HARDWARE;
 
                info->tx_types =
-                       (1 << HWTSTAMP_TX_OFF) |
-                       (1 << HWTSTAMP_TX_ON);
+                       BIT(HWTSTAMP_TX_OFF) |
+                       BIT(HWTSTAMP_TX_ON);
 
-               info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+               info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
 
                /* 82576 does not support timestamping all packets. */
                if (adapter->hw.mac.type >= e1000_82580)
-                       info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+                       info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
                else
                        info->rx_filters |=
-                               (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-                               (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+                               BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                               BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                               BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
 
                return 0;
        default:
index 7460bdbe2e49f78b64c017839226b8be4c8097a2..21727692bef63ed315849d2ca8c7cbb9dd634fa5 100644 (file)
@@ -836,7 +836,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
                        igb_write_ivar(hw, msix_vector,
                                       tx_queue & 0x7,
                                       ((tx_queue & 0x8) << 1) + 8);
-               q_vector->eims_value = 1 << msix_vector;
+               q_vector->eims_value = BIT(msix_vector);
                break;
        case e1000_82580:
        case e1000_i350:
@@ -857,7 +857,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
                        igb_write_ivar(hw, msix_vector,
                                       tx_queue >> 1,
                                       ((tx_queue & 0x1) << 4) + 8);
-               q_vector->eims_value = 1 << msix_vector;
+               q_vector->eims_value = BIT(msix_vector);
                break;
        default:
                BUG();
@@ -919,7 +919,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
                     E1000_GPIE_NSICR);
 
                /* enable msix_other interrupt */
-               adapter->eims_other = 1 << vector;
+               adapter->eims_other = BIT(vector);
                tmp = (vector++ | E1000_IVAR_VALID) << 8;
 
                wr32(E1000_IVAR_MISC, tmp);
@@ -2087,6 +2087,40 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
 }
 
+#define IGB_MAX_MAC_HDR_LEN    127
+#define IGB_MAX_NETWORK_HDR_LEN        511
+
+static netdev_features_t
+igb_features_check(struct sk_buff *skb, struct net_device *dev,
+                  netdev_features_t features)
+{
+       unsigned int network_hdr_len, mac_hdr_len;
+
+       /* Make certain the headers can be described by a context descriptor */
+       mac_hdr_len = skb_network_header(skb) - skb->data;
+       if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+       if (unlikely(network_hdr_len >  IGB_MAX_NETWORK_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       /* We can only support IPV4 TSO in tunnels if we can mangle the
+        * inner IP ID field, so strip TSO if MANGLEID is not supported.
+        */
+       if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+               features &= ~NETIF_F_TSO;
+
+       return features;
+}
+
 static const struct net_device_ops igb_netdev_ops = {
        .ndo_open               = igb_open,
        .ndo_stop               = igb_close,
@@ -2111,7 +2145,7 @@ static const struct net_device_ops igb_netdev_ops = {
        .ndo_fix_features       = igb_fix_features,
        .ndo_set_features       = igb_set_features,
        .ndo_fdb_add            = igb_ndo_fdb_add,
-       .ndo_features_check     = passthru_features_check,
+       .ndo_features_check     = igb_features_check,
 };
 
 /**
@@ -2377,38 +2411,43 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                            NETIF_F_TSO6 |
                            NETIF_F_RXHASH |
                            NETIF_F_RXCSUM |
-                           NETIF_F_HW_CSUM |
-                           NETIF_F_HW_VLAN_CTAG_RX |
-                           NETIF_F_HW_VLAN_CTAG_TX;
+                           NETIF_F_HW_CSUM;
 
        if (hw->mac.type >= e1000_82576)
                netdev->features |= NETIF_F_SCTP_CRC;
 
+#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+                                 NETIF_F_GSO_GRE_CSUM | \
+                                 NETIF_F_GSO_IPIP | \
+                                 NETIF_F_GSO_SIT | \
+                                 NETIF_F_GSO_UDP_TUNNEL | \
+                                 NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+       netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
+       netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
+
        /* copy netdev features into list of user selectable features */
-       netdev->hw_features |= netdev->features;
-       netdev->hw_features |= NETIF_F_RXALL;
+       netdev->hw_features |= netdev->features |
+                              NETIF_F_HW_VLAN_CTAG_RX |
+                              NETIF_F_HW_VLAN_CTAG_TX |
+                              NETIF_F_RXALL;
 
        if (hw->mac.type >= e1000_i350)
                netdev->hw_features |= NETIF_F_NTUPLE;
 
-       /* set this bit last since it cannot be part of hw_features */
-       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
-       netdev->vlan_features |= NETIF_F_SG |
-                                NETIF_F_TSO |
-                                NETIF_F_TSO6 |
-                                NETIF_F_HW_CSUM |
-                                NETIF_F_SCTP_CRC;
+       if (pci_using_dac)
+               netdev->features |= NETIF_F_HIGHDMA;
 
+       netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
        netdev->mpls_features |= NETIF_F_HW_CSUM;
-       netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+       netdev->hw_enc_features |= netdev->vlan_features;
 
-       netdev->priv_flags |= IFF_SUPP_NOFCS;
+       /* set this bit last since it cannot be part of vlan_features */
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_TX;
 
-       if (pci_using_dac) {
-               netdev->features |= NETIF_F_HIGHDMA;
-               netdev->vlan_features |= NETIF_F_HIGHDMA;
-       }
+       netdev->priv_flags |= IFF_SUPP_NOFCS;
 
        netdev->priv_flags |= IFF_UNICAST_FLT;
 
@@ -4064,7 +4103,7 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
        for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
                u32 vlvf = rd32(E1000_VLVF(i));
 
-               vlvf |= 1 << pf_id;
+               vlvf |= BIT(pf_id);
                wr32(E1000_VLVF(i), vlvf);
        }
 
@@ -4091,7 +4130,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
        /* guarantee that we don't scrub out management VLAN */
        vid = adapter->mng_vlan_id;
        if (vid >= vid_start && vid < vid_end)
-               vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+               vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
 
        if (!adapter->vfs_allocated_count)
                goto set_vfta;
@@ -4110,7 +4149,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
 
                if (vlvf & E1000_VLVF_VLANID_ENABLE) {
                        /* record VLAN ID in VFTA */
-                       vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+                       vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
 
                        /* if PF is part of this then continue */
                        if (test_bit(vid, adapter->active_vlans))
@@ -4118,7 +4157,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
                }
 
                /* remove PF from the pool */
-               bits = ~(1 << pf_id);
+               bits = ~BIT(pf_id);
                bits &= rd32(E1000_VLVF(i));
                wr32(E1000_VLVF(i), bits);
        }
@@ -4276,13 +4315,13 @@ static void igb_spoof_check(struct igb_adapter *adapter)
                return;
 
        for (j = 0; j < adapter->vfs_allocated_count; j++) {
-               if (adapter->wvbr & (1 << j) ||
-                   adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+               if (adapter->wvbr & BIT(j) ||
+                   adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
                        dev_warn(&adapter->pdev->dev,
                                "Spoof event(s) detected on VF %d\n", j);
                        adapter->wvbr &=
-                               ~((1 << j) |
-                                 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+                               ~(BIT(j) |
+                                 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
                }
        }
 }
@@ -4842,9 +4881,18 @@ static int igb_tso(struct igb_ring *tx_ring,
                   struct igb_tx_buffer *first,
                   u8 *hdr_len)
 {
+       u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
        struct sk_buff *skb = first->skb;
-       u32 vlan_macip_lens, type_tucmd;
-       u32 mss_l4len_idx, l4len;
+       union {
+               struct iphdr *v4;
+               struct ipv6hdr *v6;
+               unsigned char *hdr;
+       } ip;
+       union {
+               struct tcphdr *tcp;
+               unsigned char *hdr;
+       } l4;
+       u32 paylen, l4_offset;
        int err;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -4857,45 +4905,52 @@ static int igb_tso(struct igb_ring *tx_ring,
        if (err < 0)
                return err;
 
+       ip.hdr = skb_network_header(skb);
+       l4.hdr = skb_checksum_start(skb);
+
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
 
-       if (first->protocol == htons(ETH_P_IP)) {
-               struct iphdr *iph = ip_hdr(skb);
-               iph->tot_len = 0;
-               iph->check = 0;
-               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                        iph->daddr, 0,
-                                                        IPPROTO_TCP,
-                                                        0);
+       /* initialize outer IP header fields */
+       if (ip.v4->version == 4) {
+               /* IP header will have to cancel out any data that
+                * is not a part of the outer IP header
+                */
+               ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+                                                 csum_unfold(l4.tcp->check)));
                type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+
+               ip.v4->tot_len = 0;
                first->tx_flags |= IGB_TX_FLAGS_TSO |
                                   IGB_TX_FLAGS_CSUM |
                                   IGB_TX_FLAGS_IPV4;
-       } else if (skb_is_gso_v6(skb)) {
-               ipv6_hdr(skb)->payload_len = 0;
-               tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                      &ipv6_hdr(skb)->daddr,
-                                                      0, IPPROTO_TCP, 0);
+       } else {
+               ip.v6->payload_len = 0;
                first->tx_flags |= IGB_TX_FLAGS_TSO |
                                   IGB_TX_FLAGS_CSUM;
        }
 
-       /* compute header lengths */
-       l4len = tcp_hdrlen(skb);
-       *hdr_len = skb_transport_offset(skb) + l4len;
+       /* determine offset of inner transport header */
+       l4_offset = l4.hdr - skb->data;
+
+       /* compute length of segmentation header */
+       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+       /* remove payload length from inner checksum */
+       paylen = skb->len - l4_offset;
+       csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
        /* update gso size and bytecount with header size */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
        first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
        /* MSS L4LEN IDX */
-       mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
+       mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
        mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
 
        /* VLAN MACLEN IPLEN */
-       vlan_macip_lens = skb_network_header_len(skb);
-       vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens = l4.hdr - ip.hdr;
+       vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
        vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
 
        igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
@@ -5963,11 +6018,11 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
 
        /* create mask for VF and other pools */
        pool_mask = E1000_VLVF_POOLSEL_MASK;
-       vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+       vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
 
        /* drop PF from pool bits */
-       pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT +
-                            adapter->vfs_allocated_count));
+       pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
+                            adapter->vfs_allocated_count);
 
        /* Find the vlan filter for this id */
        for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
@@ -5990,7 +6045,7 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
                        goto update_vlvf;
 
                vid = vlvf & E1000_VLVF_VLANID_MASK;
-               vfta_mask = 1 << (vid % 32);
+               vfta_mask = BIT(vid % 32);
 
                /* clear bit from VFTA */
                vfta = adapter->shadow_vfta[vid / 32];
@@ -6027,7 +6082,7 @@ static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
        return idx;
 }
 
-void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
+static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 bits, pf_id;
@@ -6041,13 +6096,13 @@ void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
         * entry other than the PF.
         */
        pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
-       bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK;
+       bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
        bits &= rd32(E1000_VLVF(idx));
 
        /* Disable the filter so this falls into the default pool. */
        if (!bits) {
                if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
-                       wr32(E1000_VLVF(idx), 1 << pf_id);
+                       wr32(E1000_VLVF(idx), BIT(pf_id));
                else
                        wr32(E1000_VLVF(idx), 0);
        }
@@ -6231,9 +6286,9 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 
        /* enable transmit and receive for vf */
        reg = rd32(E1000_VFTE);
-       wr32(E1000_VFTE, reg | (1 << vf));
+       wr32(E1000_VFTE, reg | BIT(vf));
        reg = rd32(E1000_VFRE);
-       wr32(E1000_VFRE, reg | (1 << vf));
+       wr32(E1000_VFRE, reg | BIT(vf));
 
        adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
 
@@ -7927,7 +7982,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
                /* Calculate the rate factor values to set */
                rf_int = link_speed / tx_rate;
                rf_dec = (link_speed - (rf_int * tx_rate));
-               rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
+               rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
                         tx_rate;
 
                bcnrc_val = E1000_RTTBCNRC_RS_ENA;
@@ -8017,11 +8072,11 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
        reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
        reg_val = rd32(reg_offset);
        if (setting)
-               reg_val |= ((1 << vf) |
-                           (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+               reg_val |= (BIT(vf) |
+                           BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
        else
-               reg_val &= ~((1 << vf) |
-                            (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+               reg_val &= ~(BIT(vf) |
+                            BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
        wr32(reg_offset, reg_val);
 
        adapter->vf_data[vf].spoofchk_enabled = setting;
index 22a8a29895b45d324da4311dcfd93650ccd20bbb..f097c5a8ab93d4dfed43e5f47242ac0845cdc576 100644 (file)
@@ -69,9 +69,9 @@
 
 #define IGB_SYSTIM_OVERFLOW_PERIOD     (HZ * 60 * 9)
 #define IGB_PTP_TX_TIMEOUT             (HZ * 15)
-#define INCPERIOD_82576                        (1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK            ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576                 (16 << IGB_82576_TSYNC_SHIFT)
+#define INCPERIOD_82576                        BIT(E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK            GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
+#define INCVALUE_82576                 (16u << IGB_82576_TSYNC_SHIFT)
 #define IGB_NBITS_82580                        40
 
 static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
@@ -722,11 +722,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        struct skb_shared_hwtstamps shhwtstamps;
        u64 regval;
+       int adjust = 0;
 
        regval = rd32(E1000_TXSTMPL);
        regval |= (u64)rd32(E1000_TXSTMPH) << 32;
 
        igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+       /* adjust timestamp for the TX latency based on link speed */
+       if (adapter->hw.mac.type == e1000_i210) {
+               switch (adapter->link_speed) {
+               case SPEED_10:
+                       adjust = IGB_I210_TX_LATENCY_10;
+                       break;
+               case SPEED_100:
+                       adjust = IGB_I210_TX_LATENCY_100;
+                       break;
+               case SPEED_1000:
+                       adjust = IGB_I210_TX_LATENCY_1000;
+                       break;
+               }
+       }
+
+       shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);
+
        skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
        dev_kfree_skb_any(adapter->ptp_tx_skb);
        adapter->ptp_tx_skb = NULL;
@@ -771,6 +789,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
        struct igb_adapter *adapter = q_vector->adapter;
        struct e1000_hw *hw = &adapter->hw;
        u64 regval;
+       int adjust = 0;
 
        /* If this bit is set, then the RX registers contain the time stamp. No
         * other packet will be time stamped until we read these registers, so
@@ -790,6 +809,23 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
 
        igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
 
+       /* adjust timestamp for the RX latency based on link speed */
+       if (adapter->hw.mac.type == e1000_i210) {
+               switch (adapter->link_speed) {
+               case SPEED_10:
+                       adjust = IGB_I210_RX_LATENCY_10;
+                       break;
+               case SPEED_100:
+                       adjust = IGB_I210_RX_LATENCY_100;
+                       break;
+               case SPEED_1000:
+                       adjust = IGB_I210_RX_LATENCY_1000;
+                       break;
+               }
+       }
+       skb_hwtstamps(skb)->hwtstamp =
+               ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
        /* Update the last_rx_timestamp timer in order to enable watchdog check
         * for error case of latched timestamp on a dropped packet.
         */
index ae3f28332fa0151581488f2b38b618cf89d3c456..ee1ef08d7fc49df7ada655bfd8c39e83f4624695 100644 (file)
 #define E1000_RXDCTL_QUEUE_ENABLE      0x02000000 /* Enable specific Rx Que */
 
 /* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN   (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN   BIT(11) /* Tx Desc writeback RO bit */
 
 #define E1000_VF_INIT_TIMEOUT  200 /* Number of retries to clear RSTI */
 
index b74ce53d7b523e0b8414c21bd20ff9e884ec722f..8dea1b1367ef65603592d9949fe55af0521cbcf8 100644 (file)
@@ -154,7 +154,8 @@ static void igbvf_get_regs(struct net_device *netdev,
 
        memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
 
-       regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+       regs->version = (1u << 24) |
+                       (adapter->pdev->revision << 16) |
                        adapter->pdev->device;
 
        regs_buff[0] = er32(CTRL);
index f166baab8d7e59e7e8260076eeaecf21735f7514..6f4290d6dc9f3e8fec845b50597150b690210726 100644 (file)
@@ -287,8 +287,8 @@ struct igbvf_info {
 };
 
 /* hardware capability, feature, and workaround flags */
-#define IGBVF_FLAG_RX_CSUM_DISABLED    (1 << 0)
-#define IGBVF_FLAG_RX_LB_VLAN_BSWAP    (1 << 1)
+#define IGBVF_FLAG_RX_CSUM_DISABLED    BIT(0)
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP    BIT(1)
 #define IGBVF_RX_DESC_ADV(R, i)     \
        (&((((R).desc))[i].rx_desc))
 #define IGBVF_TX_DESC_ADV(R, i)     \
index c12442252adbd0bdf76c1a1aa5394a796b37d0de..322a2d7828a56ead786558ee2b7412f72daf4fe5 100644 (file)
@@ -964,7 +964,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
                        ivar = ivar & 0xFFFFFF00;
                        ivar |= msix_vector | E1000_IVAR_VALID;
                }
-               adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
+               adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
                array_ew32(IVAR0, index, ivar);
        }
        if (tx_queue > IGBVF_NO_QUEUE) {
@@ -979,7 +979,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
                        ivar = ivar & 0xFFFF00FF;
                        ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
                }
-               adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
+               adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
                array_ew32(IVAR0, index, ivar);
        }
 }
@@ -1014,8 +1014,8 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter)
 
        ew32(IVAR_MISC, tmp);
 
-       adapter->eims_enable_mask = (1 << (vector)) - 1;
-       adapter->eims_other = 1 << (vector - 1);
+       adapter->eims_enable_mask = GENMASK(vector - 1, 0);
+       adapter->eims_other = BIT(vector - 1);
        e1e_flush();
 }
 
@@ -1367,7 +1367,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        struct igbvf_ring *rx_ring = adapter->rx_ring;
        u64 rdba;
-       u32 rdlen, rxdctl;
+       u32 rxdctl;
 
        /* disable receives */
        rxdctl = er32(RXDCTL(0));
@@ -1375,8 +1375,6 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
        e1e_flush();
        msleep(10);
 
-       rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring
         */
@@ -1933,83 +1931,74 @@ static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
        buffer_info->dma = 0;
 }
 
-static int igbvf_tso(struct igbvf_adapter *adapter,
-                    struct igbvf_ring *tx_ring,
-                    struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
-                    __be16 protocol)
-{
-       struct e1000_adv_tx_context_desc *context_desc;
-       struct igbvf_buffer *buffer_info;
-       u32 info = 0, tu_cmd = 0;
-       u32 mss_l4len_idx, l4len;
-       unsigned int i;
+static int igbvf_tso(struct igbvf_ring *tx_ring,
+                    struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+       u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+       union {
+               struct iphdr *v4;
+               struct ipv6hdr *v6;
+               unsigned char *hdr;
+       } ip;
+       union {
+               struct tcphdr *tcp;
+               unsigned char *hdr;
+       } l4;
+       u32 paylen, l4_offset;
        int err;
 
-       *hdr_len = 0;
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       if (!skb_is_gso(skb))
+               return 0;
 
        err = skb_cow_head(skb, 0);
-       if (err < 0) {
-               dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
+       if (err < 0)
                return err;
-       }
 
-       l4len = tcp_hdrlen(skb);
-       *hdr_len += l4len;
-
-       if (protocol == htons(ETH_P_IP)) {
-               struct iphdr *iph = ip_hdr(skb);
-
-               iph->tot_len = 0;
-               iph->check = 0;
-               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                        iph->daddr, 0,
-                                                        IPPROTO_TCP,
-                                                        0);
-       } else if (skb_is_gso_v6(skb)) {
-               ipv6_hdr(skb)->payload_len = 0;
-               tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                      &ipv6_hdr(skb)->daddr,
-                                                      0, IPPROTO_TCP, 0);
-       }
+       ip.hdr = skb_network_header(skb);
+       l4.hdr = skb_checksum_start(skb);
 
-       i = tx_ring->next_to_use;
+       /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+       type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
 
-       buffer_info = &tx_ring->buffer_info[i];
-       context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
-       /* VLAN MACLEN IPLEN */
-       if (tx_flags & IGBVF_TX_FLAGS_VLAN)
-               info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
-       info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
-       *hdr_len += skb_network_offset(skb);
-       info |= (skb_transport_header(skb) - skb_network_header(skb));
-       *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
-       context_desc->vlan_macip_lens = cpu_to_le32(info);
+       /* initialize outer IP header fields */
+       if (ip.v4->version == 4) {
+               /* IP header will have to cancel out any data that
+                * is not a part of the outer IP header
+                */
+               ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+                                                 csum_unfold(l4.tcp->check)));
+               type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
 
-       /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-       tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+               ip.v4->tot_len = 0;
+       } else {
+               ip.v6->payload_len = 0;
+       }
 
-       if (protocol == htons(ETH_P_IP))
-               tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
-       tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+       /* determine offset of inner transport header */
+       l4_offset = l4.hdr - skb->data;
 
-       context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+       /* compute length of segmentation header */
+       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
 
-       /* MSS L4LEN IDX */
-       mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
-       mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+       /* remove payload length from inner checksum */
+       paylen = skb->len - l4_offset;
+       csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
-       context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-       context_desc->seqnum_seed = 0;
+       /* MSS L4LEN IDX */
+       mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
+       mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
 
-       buffer_info->time_stamp = jiffies;
-       buffer_info->dma = 0;
-       i++;
-       if (i == tx_ring->count)
-               i = 0;
+       /* VLAN MACLEN IPLEN */
+       vlan_macip_lens = l4.hdr - ip.hdr;
+       vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
 
-       tx_ring->next_to_use = i;
+       igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
 
-       return true;
+       return 1;
 }
 
 static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
@@ -2091,7 +2080,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
 }
 
 #define IGBVF_MAX_TXD_PWR      16
-#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
+#define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR)
 
 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
                                   struct igbvf_ring *tx_ring,
@@ -2271,8 +2260,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
 
        first = tx_ring->next_to_use;
 
-       tso = skb_is_gso(skb) ?
-               igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
+       tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
        if (unlikely(tso < 0)) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -2615,6 +2603,40 @@ static int igbvf_set_features(struct net_device *netdev,
        return 0;
 }
 
+#define IGBVF_MAX_MAC_HDR_LEN          127
+#define IGBVF_MAX_NETWORK_HDR_LEN      511
+
+static netdev_features_t
+igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
+                    netdev_features_t features)
+{
+       unsigned int network_hdr_len, mac_hdr_len;
+
+       /* Make certain the headers can be described by a context descriptor */
+       mac_hdr_len = skb_network_header(skb) - skb->data;
+       if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+       if (unlikely(network_hdr_len >  IGBVF_MAX_NETWORK_HDR_LEN))
+               return features & ~(NETIF_F_HW_CSUM |
+                                   NETIF_F_SCTP_CRC |
+                                   NETIF_F_TSO |
+                                   NETIF_F_TSO6);
+
+       /* We can only support IPV4 TSO in tunnels if we can mangle the
+        * inner IP ID field, so strip TSO if MANGLEID is not supported.
+        */
+       if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+               features &= ~NETIF_F_TSO;
+
+       return features;
+}
+
 static const struct net_device_ops igbvf_netdev_ops = {
        .ndo_open               = igbvf_open,
        .ndo_stop               = igbvf_close,
@@ -2631,7 +2653,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
        .ndo_poll_controller    = igbvf_netpoll,
 #endif
        .ndo_set_features       = igbvf_set_features,
-       .ndo_features_check     = passthru_features_check,
+       .ndo_features_check     = igbvf_features_check,
 };
 
 /**
@@ -2739,22 +2761,30 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                              NETIF_F_HW_CSUM |
                              NETIF_F_SCTP_CRC;
 
-       netdev->features = netdev->hw_features |
-                          NETIF_F_HW_VLAN_CTAG_TX |
-                          NETIF_F_HW_VLAN_CTAG_RX |
-                          NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+                                   NETIF_F_GSO_GRE_CSUM | \
+                                   NETIF_F_GSO_IPIP | \
+                                   NETIF_F_GSO_SIT | \
+                                   NETIF_F_GSO_UDP_TUNNEL | \
+                                   NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+       netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
+       netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+                              IGBVF_GSO_PARTIAL_FEATURES;
+
+       netdev->features = netdev->hw_features;
 
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
-       netdev->vlan_features |= NETIF_F_SG |
-                                NETIF_F_TSO |
-                                NETIF_F_TSO6 |
-                                NETIF_F_HW_CSUM |
-                                NETIF_F_SCTP_CRC;
-
+       netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
        netdev->mpls_features |= NETIF_F_HW_CSUM;
-       netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+       netdev->hw_enc_features |= netdev->vlan_features;
+
+       /* set this bit last since it cannot be part of vlan_features */
+       netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_TX;
 
        /*reset the controller to put the device in a known good state */
        err = hw->mac.ops.reset_hw(hw);
index a13baa90ae20298842e3aa6b5877be0f4ad7fe25..335ba66421458232cdd73e7cf1b6141c95ce48e3 100644 (file)
@@ -266,7 +266,7 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
        msgbuf[1] = vid;
        /* Setting the 8 bit field MSG INFO to true indicates "add" */
        if (set)
-               msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
+               msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT);
 
        mbx->ops.write_posted(hw, msgbuf, 2);
 
index b5c6d42daa1208dab6771d0b7578f7cdb24bde02..2664827ddecd969b3e61886c16e7059c74dbf46f 100644 (file)
@@ -68,7 +68,7 @@ config MVNETA
 
 config MVNETA_BM
        tristate
-       default y if MVNETA=y && MVNETA_BM_ENABLE
+       default y if MVNETA=y && MVNETA_BM_ENABLE!=n
        default MVNETA_BM_ENABLE
        select HWBM
        help
index 89d0d835352ecf82a91b342a64d701c60da79d20..54d5154ac0f8ec6033e0683946a429d965682f40 100644 (file)
@@ -286,12 +286,12 @@ static int pxa168_eth_stop(struct net_device *dev);
 
 static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
 {
-       return readl(pep->base + offset);
+       return readl_relaxed(pep->base + offset);
 }
 
 static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
 {
-       writel(data, pep->base + offset);
+       writel_relaxed(data, pep->base + offset);
 }
 
 static void abort_dma(struct pxa168_eth_private *pep)
@@ -342,9 +342,9 @@ static void rxq_refill(struct net_device *dev)
                pep->rx_skb[used_rx_desc] = skb;
 
                /* Return the descriptor to DMA ownership */
-               wmb();
+               dma_wmb();
                p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
-               wmb();
+               dma_wmb();
 
                /* Move the used descriptor pointer to the next descriptor */
                pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
@@ -794,7 +794,7 @@ static int rxq_process(struct net_device *dev, int budget)
                rx_used_desc = pep->rx_used_desc_q;
                rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
                cmd_sts = rx_desc->cmd_sts;
-               rmb();
+               dma_rmb();
                if (cmd_sts & (BUF_OWNED_BY_DMA))
                        break;
                skb = pep->rx_skb[rx_curr_desc];
@@ -1287,7 +1287,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        skb_tx_timestamp(skb);
 
-       wmb();
+       dma_wmb();
        desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
                        TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
        wmb();
index b531d4f3c00b5a2b397d97f11c592b4119e36289..9ea7b583096a39140b87a28783191fc4d6017c7f 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE)         += mlx5_core.o
 
 mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
                health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o   \
-               mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
+               mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
                en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
index eb926e1ee71c259291850ca906dd1bdb3c79a753..dcd2df6518de32a8e412b10a548c0973caa15568 100644 (file)
@@ -294,6 +294,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
        case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
        case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+       case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
                return MLX5_CMD_STAT_OK;
 
        case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -395,6 +396,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_FLOW_GROUP:
        case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+       case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+       case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
                *status = MLX5_DRIVER_STATUS_ABORTED;
                *synd = MLX5_DRIVER_SYND;
                return -EIO;
@@ -406,178 +409,142 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
 
 const char *mlx5_command_str(int command)
 {
-       switch (command) {
-       case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
-               return "QUERY_HCA_VPORT_CONTEXT";
-
-       case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
-               return "MODIFY_HCA_VPORT_CONTEXT";
-
-       case MLX5_CMD_OP_QUERY_HCA_CAP:
-               return "QUERY_HCA_CAP";
-
-       case MLX5_CMD_OP_SET_HCA_CAP:
-               return "SET_HCA_CAP";
-
-       case MLX5_CMD_OP_QUERY_ADAPTER:
-               return "QUERY_ADAPTER";
-
-       case MLX5_CMD_OP_INIT_HCA:
-               return "INIT_HCA";
-
-       case MLX5_CMD_OP_TEARDOWN_HCA:
-               return "TEARDOWN_HCA";
-
-       case MLX5_CMD_OP_ENABLE_HCA:
-               return "MLX5_CMD_OP_ENABLE_HCA";
-
-       case MLX5_CMD_OP_DISABLE_HCA:
-               return "MLX5_CMD_OP_DISABLE_HCA";
-
-       case MLX5_CMD_OP_QUERY_PAGES:
-               return "QUERY_PAGES";
-
-       case MLX5_CMD_OP_MANAGE_PAGES:
-               return "MANAGE_PAGES";
-
-       case MLX5_CMD_OP_CREATE_MKEY:
-               return "CREATE_MKEY";
-
-       case MLX5_CMD_OP_QUERY_MKEY:
-               return "QUERY_MKEY";
-
-       case MLX5_CMD_OP_DESTROY_MKEY:
-               return "DESTROY_MKEY";
-
-       case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
-               return "QUERY_SPECIAL_CONTEXTS";
-
-       case MLX5_CMD_OP_CREATE_EQ:
-               return "CREATE_EQ";
-
-       case MLX5_CMD_OP_DESTROY_EQ:
-               return "DESTROY_EQ";
-
-       case MLX5_CMD_OP_QUERY_EQ:
-               return "QUERY_EQ";
-
-       case MLX5_CMD_OP_CREATE_CQ:
-               return "CREATE_CQ";
-
-       case MLX5_CMD_OP_DESTROY_CQ:
-               return "DESTROY_CQ";
-
-       case MLX5_CMD_OP_QUERY_CQ:
-               return "QUERY_CQ";
-
-       case MLX5_CMD_OP_MODIFY_CQ:
-               return "MODIFY_CQ";
-
-       case MLX5_CMD_OP_CREATE_QP:
-               return "CREATE_QP";
-
-       case MLX5_CMD_OP_DESTROY_QP:
-               return "DESTROY_QP";
-
-       case MLX5_CMD_OP_RST2INIT_QP:
-               return "RST2INIT_QP";
-
-       case MLX5_CMD_OP_INIT2RTR_QP:
-               return "INIT2RTR_QP";
-
-       case MLX5_CMD_OP_RTR2RTS_QP:
-               return "RTR2RTS_QP";
-
-       case MLX5_CMD_OP_RTS2RTS_QP:
-               return "RTS2RTS_QP";
-
-       case MLX5_CMD_OP_SQERR2RTS_QP:
-               return "SQERR2RTS_QP";
-
-       case MLX5_CMD_OP_2ERR_QP:
-               return "2ERR_QP";
-
-       case MLX5_CMD_OP_2RST_QP:
-               return "2RST_QP";
-
-       case MLX5_CMD_OP_QUERY_QP:
-               return "QUERY_QP";
-
-       case MLX5_CMD_OP_MAD_IFC:
-               return "MAD_IFC";
-
-       case MLX5_CMD_OP_INIT2INIT_QP:
-               return "INIT2INIT_QP";
-
-       case MLX5_CMD_OP_CREATE_PSV:
-               return "CREATE_PSV";
-
-       case MLX5_CMD_OP_DESTROY_PSV:
-               return "DESTROY_PSV";
-
-       case MLX5_CMD_OP_CREATE_SRQ:
-               return "CREATE_SRQ";
-
-       case MLX5_CMD_OP_DESTROY_SRQ:
-               return "DESTROY_SRQ";
-
-       case MLX5_CMD_OP_QUERY_SRQ:
-               return "QUERY_SRQ";
-
-       case MLX5_CMD_OP_ARM_RQ:
-               return "ARM_RQ";
-
-       case MLX5_CMD_OP_CREATE_XRC_SRQ:
-               return "CREATE_XRC_SRQ";
-
-       case MLX5_CMD_OP_DESTROY_XRC_SRQ:
-               return "DESTROY_XRC_SRQ";
-
-       case MLX5_CMD_OP_QUERY_XRC_SRQ:
-               return "QUERY_XRC_SRQ";
-
-       case MLX5_CMD_OP_ARM_XRC_SRQ:
-               return "ARM_XRC_SRQ";
-
-       case MLX5_CMD_OP_ALLOC_PD:
-               return "ALLOC_PD";
-
-       case MLX5_CMD_OP_DEALLOC_PD:
-               return "DEALLOC_PD";
-
-       case MLX5_CMD_OP_ALLOC_UAR:
-               return "ALLOC_UAR";
-
-       case MLX5_CMD_OP_DEALLOC_UAR:
-               return "DEALLOC_UAR";
-
-       case MLX5_CMD_OP_ATTACH_TO_MCG:
-               return "ATTACH_TO_MCG";
-
-       case MLX5_CMD_OP_DETTACH_FROM_MCG:
-               return "DETTACH_FROM_MCG";
-
-       case MLX5_CMD_OP_ALLOC_XRCD:
-               return "ALLOC_XRCD";
-
-       case MLX5_CMD_OP_DEALLOC_XRCD:
-               return "DEALLOC_XRCD";
-
-       case MLX5_CMD_OP_ACCESS_REG:
-               return "MLX5_CMD_OP_ACCESS_REG";
-
-       case MLX5_CMD_OP_SET_WOL_ROL:
-               return "SET_WOL_ROL";
-
-       case MLX5_CMD_OP_QUERY_WOL_ROL:
-               return "QUERY_WOL_ROL";
-
-       case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
-               return "ADD_VXLAN_UDP_DPORT";
-
-       case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
-               return "DELETE_VXLAN_UDP_DPORT";
+#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
 
+       switch (command) {
+       MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
+       MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
+       MLX5_COMMAND_STR_CASE(INIT_HCA);
+       MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
+       MLX5_COMMAND_STR_CASE(ENABLE_HCA);
+       MLX5_COMMAND_STR_CASE(DISABLE_HCA);
+       MLX5_COMMAND_STR_CASE(QUERY_PAGES);
+       MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
+       MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
+       MLX5_COMMAND_STR_CASE(QUERY_ISSI);
+       MLX5_COMMAND_STR_CASE(SET_ISSI);
+       MLX5_COMMAND_STR_CASE(CREATE_MKEY);
+       MLX5_COMMAND_STR_CASE(QUERY_MKEY);
+       MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
+       MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
+       MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
+       MLX5_COMMAND_STR_CASE(CREATE_EQ);
+       MLX5_COMMAND_STR_CASE(DESTROY_EQ);
+       MLX5_COMMAND_STR_CASE(QUERY_EQ);
+       MLX5_COMMAND_STR_CASE(GEN_EQE);
+       MLX5_COMMAND_STR_CASE(CREATE_CQ);
+       MLX5_COMMAND_STR_CASE(DESTROY_CQ);
+       MLX5_COMMAND_STR_CASE(QUERY_CQ);
+       MLX5_COMMAND_STR_CASE(MODIFY_CQ);
+       MLX5_COMMAND_STR_CASE(CREATE_QP);
+       MLX5_COMMAND_STR_CASE(DESTROY_QP);
+       MLX5_COMMAND_STR_CASE(RST2INIT_QP);
+       MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
+       MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
+       MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
+       MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
+       MLX5_COMMAND_STR_CASE(2ERR_QP);
+       MLX5_COMMAND_STR_CASE(2RST_QP);
+       MLX5_COMMAND_STR_CASE(QUERY_QP);
+       MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
+       MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
+       MLX5_COMMAND_STR_CASE(CREATE_PSV);
+       MLX5_COMMAND_STR_CASE(DESTROY_PSV);
+       MLX5_COMMAND_STR_CASE(CREATE_SRQ);
+       MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
+       MLX5_COMMAND_STR_CASE(QUERY_SRQ);
+       MLX5_COMMAND_STR_CASE(ARM_RQ);
+       MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
+       MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
+       MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
+       MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
+       MLX5_COMMAND_STR_CASE(CREATE_DCT);
+       MLX5_COMMAND_STR_CASE(DESTROY_DCT);
+       MLX5_COMMAND_STR_CASE(DRAIN_DCT);
+       MLX5_COMMAND_STR_CASE(QUERY_DCT);
+       MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
+       MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
+       MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
+       MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
+       MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
+       MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
+       MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
+       MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
+       MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
+       MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
+       MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
+       MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
+       MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
+       MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
+       MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
+       MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
+       MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+       MLX5_COMMAND_STR_CASE(ALLOC_PD);
+       MLX5_COMMAND_STR_CASE(DEALLOC_PD);
+       MLX5_COMMAND_STR_CASE(ALLOC_UAR);
+       MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
+       MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
+       MLX5_COMMAND_STR_CASE(ACCESS_REG);
+       MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
+       MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG);
+       MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
+       MLX5_COMMAND_STR_CASE(MAD_IFC);
+       MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
+       MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
+       MLX5_COMMAND_STR_CASE(NOP);
+       MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
+       MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
+       MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
+       MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
+       MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
+       MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
+       MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
+       MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
+       MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
+       MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
+       MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
+       MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
+       MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
+       MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
+       MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
+       MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
+       MLX5_COMMAND_STR_CASE(CREATE_TIR);
+       MLX5_COMMAND_STR_CASE(MODIFY_TIR);
+       MLX5_COMMAND_STR_CASE(DESTROY_TIR);
+       MLX5_COMMAND_STR_CASE(QUERY_TIR);
+       MLX5_COMMAND_STR_CASE(CREATE_SQ);
+       MLX5_COMMAND_STR_CASE(MODIFY_SQ);
+       MLX5_COMMAND_STR_CASE(DESTROY_SQ);
+       MLX5_COMMAND_STR_CASE(QUERY_SQ);
+       MLX5_COMMAND_STR_CASE(CREATE_RQ);
+       MLX5_COMMAND_STR_CASE(MODIFY_RQ);
+       MLX5_COMMAND_STR_CASE(DESTROY_RQ);
+       MLX5_COMMAND_STR_CASE(QUERY_RQ);
+       MLX5_COMMAND_STR_CASE(CREATE_RMP);
+       MLX5_COMMAND_STR_CASE(MODIFY_RMP);
+       MLX5_COMMAND_STR_CASE(DESTROY_RMP);
+       MLX5_COMMAND_STR_CASE(QUERY_RMP);
+       MLX5_COMMAND_STR_CASE(CREATE_TIS);
+       MLX5_COMMAND_STR_CASE(MODIFY_TIS);
+       MLX5_COMMAND_STR_CASE(DESTROY_TIS);
+       MLX5_COMMAND_STR_CASE(QUERY_TIS);
+       MLX5_COMMAND_STR_CASE(CREATE_RQT);
+       MLX5_COMMAND_STR_CASE(MODIFY_RQT);
+       MLX5_COMMAND_STR_CASE(DESTROY_RQT);
+       MLX5_COMMAND_STR_CASE(QUERY_RQT);
+       MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
+       MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
+       MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
+       MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
+       MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
+       MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
+       MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
+       MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
+       MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
+       MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
+       MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
+       MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
+       MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
        default: return "unknown command opcode";
        }
 }
index 08040702824d9885311921d8c26e95e410e80919..fd4392999eeefcd9a2f77b05583b04d04a10d663 100644 (file)
@@ -2154,6 +2154,8 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
                        return mlx5e_configure_flower(priv, proto, tc->cls_flower);
                case TC_CLSFLOWER_DESTROY:
                        return mlx5e_delete_flower(priv, tc->cls_flower);
+               case TC_CLSFLOWER_STATS:
+                       return mlx5e_stats_flower(priv, tc->cls_flower);
                }
        default:
                return -EOPNOTSUPP;
index ef017c0decdc39b3de23bc9a260f17930f1e64cc..704c3d30493e8218a13a1a1c0ebb4eac7885f9c0 100644 (file)
@@ -53,13 +53,24 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
                                                u32 *match_c, u32 *match_v,
                                                u32 action, u32 flow_tag)
 {
-       struct mlx5_flow_destination dest = {
-               .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
-               {.ft = priv->fs.vlan.ft.t},
-       };
+       struct mlx5_core_dev *dev = priv->mdev;
+       struct mlx5_flow_destination dest = { 0 };
+       struct mlx5_fc *counter = NULL;
        struct mlx5_flow_rule *rule;
        bool table_created = false;
 
+       if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+               dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+               dest.ft = priv->fs.vlan.ft.t;
+       } else {
+               counter = mlx5_fc_create(dev, true);
+               if (IS_ERR(counter))
+                       return ERR_CAST(counter);
+
+               dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+               dest.counter = counter;
+       }
+
        if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
                priv->fs.tc.t =
                        mlx5_create_auto_grouped_flow_table(priv->fs.ns,
@@ -70,7 +81,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
                if (IS_ERR(priv->fs.tc.t)) {
                        netdev_err(priv->netdev,
                                   "Failed to create tc offload table\n");
-                       return ERR_CAST(priv->fs.tc.t);
+                       rule = ERR_CAST(priv->fs.tc.t);
+                       goto err_create_ft;
                }
 
                table_created = true;
@@ -79,12 +91,20 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
        rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
                                  match_c, match_v,
                                  action, flow_tag,
-                                 action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
+                                 &dest);
+
+       if (IS_ERR(rule))
+               goto err_add_rule;
+
+       return rule;
 
-       if (IS_ERR(rule) && table_created) {
+err_add_rule:
+       if (table_created) {
                mlx5_destroy_flow_table(priv->fs.tc.t);
                priv->fs.tc.t = NULL;
        }
+err_create_ft:
+       mlx5_fc_destroy(dev, counter);
 
        return rule;
 }
@@ -92,8 +112,14 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
                              struct mlx5_flow_rule *rule)
 {
+       struct mlx5_fc *counter = NULL;
+
+       counter = mlx5_flow_rule_counter(rule);
+
        mlx5_del_flow_rule(rule);
 
+       mlx5_fc_destroy(priv->mdev, counter);
+
        if (!mlx5e_tc_num_filters(priv)) {
                mlx5_destroy_flow_table(priv->fs.tc.t);
                priv->fs.tc.t = NULL;
@@ -286,6 +312,9 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_gact_shot(a)) {
                        *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+                       if (MLX5_CAP_FLOWTABLE(priv->mdev,
+                                              flow_table_properties_nic_receive.flow_counter))
+                               *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
                        continue;
                }
 
@@ -394,6 +423,34 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
        return 0;
 }
 
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+                      struct tc_cls_flower_offload *f)
+{
+       struct mlx5e_tc_table *tc = &priv->fs.tc;
+       struct mlx5e_tc_flow *flow;
+       struct tc_action *a;
+       struct mlx5_fc *counter;
+       u64 bytes;
+       u64 packets;
+       u64 lastuse;
+
+       flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+                                     tc->ht_params);
+       if (!flow)
+               return -EINVAL;
+
+       counter = mlx5_flow_rule_counter(flow->rule);
+       if (!counter)
+               return 0;
+
+       mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+
+       tc_for_each_action(a, f->exts)
+               tcf_action_stats_update(a, bytes, packets, lastuse);
+
+       return 0;
+}
+
 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
        .head_offset = offsetof(struct mlx5e_tc_flow, node),
        .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
index a4f17b974d622aea98c1b14ab62ab999fc8edcad..34bf903fc8863b14367afdb674d96ee5ec5468bc 100644 (file)
@@ -43,6 +43,9 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
 int mlx5e_delete_flower(struct mlx5e_priv *priv,
                        struct tc_cls_flower_offload *f);
 
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+                      struct tc_cls_flower_offload *f);
+
 static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
 {
        return atomic_read(&priv->fs.tc.ht.nelems);
index 9797768891ee3cae70d8a77390ebcc1fee4498f9..a5bb6b6952421347b1f91a9b49a60dfc18e4f7b6 100644 (file)
@@ -241,17 +241,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
        MLX5_SET(flow_context, in_flow_context, group_id, group_id);
        MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
        MLX5_SET(flow_context, in_flow_context, action, fte->action);
-       MLX5_SET(flow_context, in_flow_context, destination_list_size,
-                fte->dests_size);
        in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
                                      match_value);
        memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
 
+       in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
        if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-               in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+               int list_size = 0;
+
                list_for_each_entry(dst, &fte->node.children, node.list) {
                        unsigned int id;
 
+                       if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
+
                        MLX5_SET(dest_format_struct, in_dests, destination_type,
                                 dst->dest_attr.type);
                        if (dst->dest_attr.type ==
@@ -262,8 +265,31 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
                        }
                        MLX5_SET(dest_format_struct, in_dests, destination_id, id);
                        in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+                       list_size++;
+               }
+
+               MLX5_SET(flow_context, in_flow_context, destination_list_size,
+                        list_size);
+       }
+
+       if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               int list_size = 0;
+
+               list_for_each_entry(dst, &fte->node.children, node.list) {
+                       if (dst->dest_attr.type !=
+                           MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
+
+                       MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+                                dst->dest_attr.counter->id);
+                       in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+                       list_size++;
                }
+
+               MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+                        list_size);
        }
+
        memset(out, 0, sizeof(out));
        err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
                                         sizeof(out));
@@ -283,18 +309,16 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
                        struct mlx5_flow_table *ft,
                        unsigned group_id,
+                       int modify_mask,
                        struct fs_fte *fte)
 {
        int opmod;
-       int modify_mask;
        int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
                                                flow_table_properties_nic_receive.
                                                flow_modify_en);
        if (!atomic_mod_cap)
                return -ENOTSUPP;
        opmod = 1;
-       modify_mask = 1 <<
-               MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
 
        return  mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
 }
@@ -323,3 +347,69 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
 
        return err;
 }
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
+{
+       u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)];
+       u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(alloc_flow_counter_in, in, opcode,
+                MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+
+       err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+                                        sizeof(out));
+       if (err)
+               return err;
+
+       *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+
+       return 0;
+}
+
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
+{
+       u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)];
+       u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(dealloc_flow_counter_in, in, opcode,
+                MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+       MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
+
+       return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+                                         sizeof(out));
+}
+
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+                     u64 *packets, u64 *bytes)
+{
+       u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+               MLX5_ST_SZ_BYTES(traffic_counter)];
+       u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+       void *stats;
+       int err = 0;
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(query_flow_counter_in, in, opcode,
+                MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+       MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+       MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
+
+       err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+       if (err)
+               return err;
+
+       stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
+       *packets = MLX5_GET64(traffic_counter, stats, packets);
+       *bytes = MLX5_GET64(traffic_counter, stats, octets);
+
+       return 0;
+}
index c97b4a03eeedbdde80958f06b895a0380c54b0b7..fc4f7b83fe0ae4cb516775d15dbdd53f40a1dc9e 100644 (file)
@@ -62,6 +62,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
                        struct mlx5_flow_table *ft,
                        unsigned group_id,
+                       int modify_mask,
                        struct fs_fte *fte);
 
 int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
@@ -70,4 +71,9 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
 
 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
                            struct mlx5_flow_table *ft);
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+                     u64 *packets, u64 *bytes);
 #endif
index 659a6980cda28a410f59540bb8d69f9eceb1847f..8b5f0b2c0d5cd5d8f62cf6560ee9fa498800ac03 100644 (file)
@@ -344,6 +344,7 @@ static void del_rule(struct fs_node *node)
        struct mlx5_flow_group *fg;
        struct fs_fte *fte;
        u32     *match_value;
+       int modify_mask;
        struct mlx5_core_dev *dev = get_dev(node);
        int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
        int err;
@@ -367,8 +368,11 @@ static void del_rule(struct fs_node *node)
        }
        if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
            --fte->dests_size) {
+               modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
                err = mlx5_cmd_update_fte(dev, ft,
-                                         fg->id, fte);
+                                         fg->id,
+                                         modify_mask,
+                                         fte);
                if (err)
                        pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
                                __func__, fg->id, fte->index);
@@ -615,6 +619,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
        struct mlx5_flow_table *ft;
        struct mlx5_flow_group *fg;
        struct fs_fte *fte;
+       int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
        int err = 0;
 
        fs_get_obj(fte, rule->node.parent);
@@ -626,7 +631,9 @@ int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
 
        memcpy(&rule->dest_attr, dest, sizeof(*dest));
        err = mlx5_cmd_update_fte(get_dev(&ft->node),
-                                 ft, fg->id, fte);
+                                 ft, fg->id,
+                                 modify_mask,
+                                 fte);
        unlock_ref_node(&fte->node);
 
        return err;
@@ -877,6 +884,7 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
 {
        struct mlx5_flow_table *ft;
        struct mlx5_flow_rule *rule;
+       int modify_mask = 0;
        int err;
 
        rule = alloc_rule(dest);
@@ -892,14 +900,20 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
                list_add(&rule->node.list, &fte->node.children);
        else
                list_add_tail(&rule->node.list, &fte->node.children);
-       if (dest)
+       if (dest) {
                fte->dests_size++;
+
+               modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ?
+                       BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) :
+                       BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+       }
+
        if (fte->dests_size == 1 || !dest)
                err = mlx5_cmd_create_fte(get_dev(&ft->node),
                                          ft, fg->id, fte);
        else
                err = mlx5_cmd_update_fte(get_dev(&ft->node),
-                                         ft, fg->id, fte);
+                                         ft, fg->id, modify_mask, fte);
        if (err)
                goto free_rule;
 
@@ -1092,10 +1106,40 @@ unlock_fg:
        return rule;
 }
 
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule)
+{
+       struct mlx5_flow_rule *dst;
+       struct fs_fte *fte;
+
+       fs_get_obj(fte, rule->node.parent);
+
+       fs_for_each_dst(dst, fte) {
+               if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                       return dst->dest_attr.counter;
+       }
+
+       return NULL;
+}
+
+static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
+{
+       if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
+               return !counter;
+
+       if (!counter)
+               return false;
+
+       /* Hardware support counter for a drop action only */
+       return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT);
+}
+
 static bool dest_is_valid(struct mlx5_flow_destination *dest,
                          u32 action,
                          struct mlx5_flow_table *ft)
 {
+       if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
+               return counter_is_valid(dest->counter, action);
+
        if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
                return true;
 
@@ -1727,6 +1771,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
        cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
        cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
        cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns);
+       mlx5_cleanup_fc_stats(dev);
 }
 
 static int init_fdb_root_ns(struct mlx5_core_dev *dev)
@@ -1783,10 +1828,14 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
 {
        int err = 0;
 
+       err = mlx5_init_fc_stats(dev);
+       if (err)
+               return err;
+
        if (MLX5_CAP_GEN(dev, nic_flow_table)) {
                err = init_root_ns(dev);
                if (err)
-                       return err;
+                       goto err;
        }
        if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
                err = init_fdb_root_ns(dev);
index 8e76cc505f5a4b06c313d820a53582af44800ea6..aa41a7314691977e5457bdb2f159334d1dcf76aa 100644 (file)
@@ -96,6 +96,28 @@ struct mlx5_flow_table {
        struct list_head                fwd_rules;
 };
 
+struct mlx5_fc_cache {
+       u64 packets;
+       u64 bytes;
+       u64 lastuse;
+};
+
+struct mlx5_fc {
+       struct list_head list;
+
+       /* last{packets,bytes} members are used when calculating the delta since
+        * last reading
+        */
+       u64 lastpackets;
+       u64 lastbytes;
+
+       u16 id;
+       bool deleted;
+       bool aging;
+
+       struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
+};
+
 /* Type of children is mlx5_flow_rule */
 struct fs_fte {
        struct fs_node                  node;
@@ -105,6 +127,7 @@ struct fs_fte {
        u32                             index;
        u32                             action;
        enum fs_fte_status              status;
+       struct mlx5_fc                  *counter;
 };
 
 /* Type of children is mlx5_flow_table/namespace */
@@ -146,6 +169,9 @@ struct mlx5_flow_root_namespace {
        struct mutex                    chain_lock;
 };
 
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
+
 int mlx5_init_fs(struct mlx5_core_dev *dev);
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
new file mode 100644 (file)
index 0000000..164dc37
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+
+#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
+
+/* locking scheme:
+ *
+ * It is the responsibility of the user to prevent concurrent calls or bad
+ * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
+ * to struct mlx5_fc.
+ * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
+ * dump (access to struct mlx5_fc) after a counter is destroyed.
+ *
+ * access to counter list:
+ * - create (user context)
+ *   - mlx5_fc_create() only adds to an addlist to be used by
+ *     mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
+ *   - spawn thread to do the actual destroy
+ *
+ * - destroy (user context)
+ *   - mark a counter as deleted
+ *   - spawn thread to do the actual del
+ *
+ * - dump (user context)
+ *   user should not call dump after destroy
+ *
+ * - query (single thread workqueue context)
+ *   destroy/dump - no conflict (see destroy)
+ *   query/dump - packets and bytes might be inconsistent (since update is not
+ *                atomic)
+ *   query/create - no conflict (see create)
+ *   since every create/destroy spawn the work, only after necessary time has
+ *   elapsed, the thread will actually query the hardware.
+ */
+
+static void mlx5_fc_stats_work(struct work_struct *work)
+{
+       struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
+                                                priv.fc_stats.work.work);
+       struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+       unsigned long now = jiffies;
+       struct mlx5_fc *counter;
+       struct mlx5_fc *tmp;
+       int err = 0;
+
+       spin_lock(&fc_stats->addlist_lock);
+
+       list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+
+       if (!list_empty(&fc_stats->list))
+               queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
+
+       spin_unlock(&fc_stats->addlist_lock);
+
+       list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+               struct mlx5_fc_cache *c = &counter->cache;
+               u64 packets;
+               u64 bytes;
+
+               if (counter->deleted) {
+                       list_del(&counter->list);
+
+                       mlx5_cmd_fc_free(dev, counter->id);
+
+                       kfree(counter);
+                       continue;
+               }
+
+               if (time_before(now, fc_stats->next_query))
+                       continue;
+
+               err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes);
+               if (err) {
+                       pr_err("Error querying stats for counter id %d\n",
+                              counter->id);
+                       continue;
+               }
+
+               if (packets == c->packets)
+                       continue;
+
+               c->lastuse = jiffies;
+               c->packets = packets;
+               c->bytes   = bytes;
+       }
+
+       if (time_after_eq(now, fc_stats->next_query))
+               fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
+}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+       struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+       struct mlx5_fc *counter;
+       int err;
+
+       counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+       if (!counter)
+               return ERR_PTR(-ENOMEM);
+
+       err = mlx5_cmd_fc_alloc(dev, &counter->id);
+       if (err)
+               goto err_out;
+
+       if (aging) {
+               counter->aging = true;
+
+               spin_lock(&fc_stats->addlist_lock);
+               list_add(&counter->list, &fc_stats->addlist);
+               spin_unlock(&fc_stats->addlist_lock);
+
+               mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+       }
+
+       return counter;
+
+err_out:
+       kfree(counter);
+
+       return ERR_PTR(err);
+}
+
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
+{
+       struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+       if (!counter)
+               return;
+
+       if (counter->aging) {
+               counter->deleted = true;
+               mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+               return;
+       }
+
+       mlx5_cmd_fc_free(dev, counter->id);
+       kfree(counter);
+}
+
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
+{
+       struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+       INIT_LIST_HEAD(&fc_stats->list);
+       INIT_LIST_HEAD(&fc_stats->addlist);
+       spin_lock_init(&fc_stats->addlist_lock);
+
+       fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
+       if (!fc_stats->wq)
+               return -ENOMEM;
+
+       INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
+
+       return 0;
+}
+
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
+{
+       struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+       struct mlx5_fc *counter;
+       struct mlx5_fc *tmp;
+
+       cancel_delayed_work_sync(&dev->priv.fc_stats.work);
+       destroy_workqueue(dev->priv.fc_stats.wq);
+       dev->priv.fc_stats.wq = NULL;
+
+       list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+
+       list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+               list_del(&counter->list);
+
+               mlx5_cmd_fc_free(dev, counter->id);
+
+               kfree(counter);
+       }
+}
+
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+                         u64 *bytes, u64 *packets, u64 *lastuse)
+{
+       struct mlx5_fc_cache c;
+
+       c = counter->cache;
+
+       *bytes = c.bytes - counter->lastbytes;
+       *packets = c.packets - counter->lastpackets;
+       *lastuse = c.lastuse;
+
+       counter->lastbytes = c.bytes;
+       counter->lastpackets = c.packets;
+}
index cda9e604a95f68d61227808779a93abf1c400342..0844b7c7576709c8a271142fd85f41b8007fc108 100644 (file)
@@ -1417,6 +1417,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
        struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
        struct pci_dev *pdev = adapter->pdev;
        bool extended = false;
+       int ret;
 
        prev_version = adapter->fw_version;
        current_version = qlcnic_83xx_get_fw_version(adapter);
@@ -1427,8 +1428,11 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
                if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
                        extended = !qlcnic_83xx_extend_md_capab(adapter);
 
-               if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
-                       dev_info(&pdev->dev, "Supports FW dump capability\n");
+               ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
+               if (ret)
+                       return;
+
+               dev_info(&pdev->dev, "Supports FW dump capability\n");
 
                /* Once we have minidump template with extended iSCSI dump
                 * capability, update the minidump capture mask to 0x1f as
index 34066e0649f5c673b6d623c2d2056340a5c39801..bcebafd780234f96c433293347cf052acc9270f2 100644 (file)
@@ -1667,6 +1667,8 @@ static int ravb_close(struct net_device *ndev)
                priv->phydev = NULL;
        }
 
+       if (priv->chip_id == RCAR_GEN3)
+               free_irq(priv->emac_irq, ndev);
        free_irq(ndev->irq, ndev);
 
        napi_disable(&priv->napi[RAVB_NC]);
index b868e458d0b5dc7929ac44f38ae5d140b9727d66..93a2d3c07303e6eee13f8e3f083b213a4b753609 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
+#include <linux/of_net.h>
 #include <linux/spi/spi.h>
 
 #include "w5100.h"
@@ -414,6 +415,7 @@ static int w5100_spi_probe(struct spi_device *spi)
        const struct spi_device_id *id = spi_get_device_id(spi);
        const struct w5100_ops *ops;
        int priv_size;
+       const void *mac = of_get_mac_address(spi->dev.of_node);
 
        switch (id->driver_data) {
        case W5100:
@@ -432,7 +434,7 @@ static int w5100_spi_probe(struct spi_device *spi)
                return -EINVAL;
        }
 
-       return w5100_probe(&spi->dev, ops, priv_size, NULL, spi->irq, -EINVAL);
+       return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL);
 }
 
 static int w5100_spi_remove(struct spi_device *spi)
index ec1889ce38a384f1311286c5835f61bc1f26bf40..4f6255cf62ce9f7e29d59dec6e01927d2a51721b 100644 (file)
@@ -63,8 +63,9 @@ MODULE_LICENSE("GPL");
 #define S0_REGS(priv)          ((priv)->s0_regs)
 
 #define W5100_S0_MR(priv)      (S0_REGS(priv) + W5100_Sn_MR)
-#define   S0_MR_MACRAW           0x04 /* MAC RAW mode (promiscuous) */
-#define   S0_MR_MACRAW_MF        0x44 /* MAC RAW mode (filtered) */
+#define   S0_MR_MACRAW           0x04 /* MAC RAW mode */
+#define   S0_MR_MF               0x40 /* MAC Filter for W5100 and W5200 */
+#define   W5500_S0_MR_MF         0x80 /* MAC Filter for W5500 */
 #define W5100_S0_CR(priv)      (S0_REGS(priv) + W5100_Sn_CR)
 #define   S0_CR_OPEN             0x01 /* OPEN command */
 #define   S0_CR_CLOSE            0x10 /* CLOSE command */
@@ -173,11 +174,6 @@ struct w5100_priv {
        struct work_struct restart_work;
 };
 
-static inline bool is_w5200(struct w5100_priv *priv)
-{
-       return priv->ops->chip_id == W5200;
-}
-
 /************************************************************************
  *
  *  Lowlevel I/O functions
@@ -707,8 +703,16 @@ static int w5100_hw_reset(struct w5100_priv *priv)
 
 static void w5100_hw_start(struct w5100_priv *priv)
 {
-       w5100_write(priv, W5100_S0_MR(priv), priv->promisc ?
-                         S0_MR_MACRAW : S0_MR_MACRAW_MF);
+       u8 mode = S0_MR_MACRAW;
+
+       if (!priv->promisc) {
+               if (priv->ops->chip_id == W5500)
+                       mode |= W5500_S0_MR_MF;
+               else
+                       mode |= S0_MR_MF;
+       }
+
+       w5100_write(priv, W5100_S0_MR(priv), mode);
        w5100_command(priv, S0_CR_OPEN);
        w5100_enable_intr(priv);
 }
@@ -1048,7 +1052,7 @@ static const struct net_device_ops w5100_netdev_ops = {
 static int w5100_mmio_probe(struct platform_device *pdev)
 {
        struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
-       u8 *mac_addr = NULL;
+       const void *mac_addr = NULL;
        struct resource *mem;
        const struct w5100_ops *ops;
        int irq;
@@ -1083,7 +1087,8 @@ void *w5100_ops_priv(const struct net_device *ndev)
 EXPORT_SYMBOL_GPL(w5100_ops_priv);
 
 int w5100_probe(struct device *dev, const struct w5100_ops *ops,
-               int sizeof_ops_priv, u8 *mac_addr, int irq, int link_gpio)
+               int sizeof_ops_priv, const void *mac_addr, int irq,
+               int link_gpio)
 {
        struct w5100_priv *priv;
        struct net_device *ndev;
@@ -1138,7 +1143,6 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
 
        ndev->netdev_ops = &w5100_netdev_ops;
        ndev->ethtool_ops = &w5100_ethtool_ops;
-       ndev->watchdog_timeo = HZ;
        netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
 
        /* This chip doesn't support VLAN packets with normal MTU,
index f8a16fad807b4884c76c9c893282e763287b4cad..17983a3b8d6c6ad4339ed654249d436ce7c08669 100644 (file)
@@ -30,7 +30,8 @@ struct w5100_ops {
 void *w5100_ops_priv(const struct net_device *ndev);
 
 int w5100_probe(struct device *dev, const struct w5100_ops *ops,
-               int sizeof_ops_priv, u8 *mac_addr, int irq, int link_gpio);
+               int sizeof_ops_priv, const void *mac_addr, int irq,
+               int link_gpio);
 int w5100_remove(struct device *dev);
 
 extern const struct dev_pm_ops w5100_pm_ops;
index 6700a4dca7c82c55340b645a94e1a86d6590d41e..c270c5a54f3aa8a377877b72ade55146cbe7ec74 100644 (file)
@@ -158,7 +158,7 @@ enum rndis_device_state {
 };
 
 struct rndis_device {
-       struct netvsc_device *net_dev;
+       struct net_device *ndev;
 
        enum rndis_device_state state;
        bool link_state;
@@ -202,7 +202,7 @@ int rndis_filter_receive(struct hv_device *dev,
 int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
 int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
 
-void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf);
+void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
 
 #define NVSP_INVALID_PROTOCOL_VERSION  ((u32)0xFFFFFFFF)
 
@@ -653,6 +653,8 @@ struct garp_wrk {
 struct net_device_context {
        /* point back to our device context */
        struct hv_device *device_ctx;
+       /* netvsc_device */
+       struct netvsc_device *nvdev;
        /* reconfigure work */
        struct delayed_work dwork;
        /* last reconfig time */
@@ -672,17 +674,17 @@ struct net_device_context {
        /* Ethtool settings */
        u8 duplex;
        u32 speed;
+
+       /* the device is going away */
+       bool start_remove;
 };
 
 /* Per netvsc device */
 struct netvsc_device {
-       struct hv_device *dev;
-
        u32 nvsp_version;
 
        atomic_t num_outstanding_sends;
        wait_queue_head_t wait_drain;
-       bool start_remove;
        bool destroy;
 
        /* Receive buffer allocated by us but manages by NetVSP */
@@ -708,8 +710,6 @@ struct netvsc_device {
        struct nvsp_message revoke_packet;
        /* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
 
-       struct net_device *ndev;
-
        struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
        u32 send_table[VRSS_SEND_TAB_SIZE];
        u32 max_chn;
@@ -732,9 +732,6 @@ struct netvsc_device {
        u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
        u32 pkt_align; /* alignment bytes, e.g. 8 */
 
-       /* The net device context */
-       struct net_device_context *nd_ctx;
-
        /* 1: allocated, serial number is valid. 0: not allocated */
        u32 vf_alloc;
        /* Serial number of the VF to team with */
index eddce3cdafa8b5cdc7f0421cad77b55dec762068..719cb3578e55a53a5ef909eb16cfe15c5a718311 100644 (file)
  * Switch the data path from the synthetic interface to the VF
  * interface.
  */
-void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf)
+void netvsc_switch_datapath(struct net_device *ndev, bool vf)
 {
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct hv_device *dev = net_device_ctx->device_ctx;
+       struct netvsc_device *nv_dev = net_device_ctx->nvdev;
        struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
-       struct hv_device *dev = nv_dev->dev;
 
        memset(init_pkt, 0, sizeof(struct nvsp_message));
        init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
@@ -58,10 +60,9 @@ void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf)
 }
 
 
-static struct netvsc_device *alloc_net_device(struct hv_device *device)
+static struct netvsc_device *alloc_net_device(void)
 {
        struct netvsc_device *net_device;
-       struct net_device *ndev = hv_get_drvdata(device);
 
        net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
        if (!net_device)
@@ -74,19 +75,15 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
        }
 
        init_waitqueue_head(&net_device->wait_drain);
-       net_device->start_remove = false;
        net_device->destroy = false;
        atomic_set(&net_device->open_cnt, 0);
        atomic_set(&net_device->vf_use_cnt, 0);
-       net_device->dev = device;
-       net_device->ndev = ndev;
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
 
        net_device->vf_netdev = NULL;
        net_device->vf_inject = false;
 
-       hv_set_drvdata(device, net_device);
        return net_device;
 }
 
@@ -98,9 +95,10 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
 
 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
 {
-       struct netvsc_device *net_device;
+       struct net_device *ndev = hv_get_drvdata(device);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *net_device = net_device_ctx->nvdev;
 
-       net_device = hv_get_drvdata(device);
        if (net_device && net_device->destroy)
                net_device = NULL;
 
@@ -109,9 +107,9 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
 
 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
 {
-       struct netvsc_device *net_device;
-
-       net_device = hv_get_drvdata(device);
+       struct net_device *ndev = hv_get_drvdata(device);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *net_device = net_device_ctx->nvdev;
 
        if (!net_device)
                goto get_in_err;
@@ -125,11 +123,13 @@ get_in_err:
 }
 
 
-static int netvsc_destroy_buf(struct netvsc_device *net_device)
+static int netvsc_destroy_buf(struct hv_device *device)
 {
        struct nvsp_message *revoke_packet;
        int ret = 0;
-       struct net_device *ndev = net_device->ndev;
+       struct net_device *ndev = hv_get_drvdata(device);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *net_device = net_device_ctx->nvdev;
 
        /*
         * If we got a section count, it means we received a
@@ -147,7 +147,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
                revoke_packet->msg.v1_msg.
                revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
 
-               ret = vmbus_sendpacket(net_device->dev->channel,
+               ret = vmbus_sendpacket(device->channel,
                                       revoke_packet,
                                       sizeof(struct nvsp_message),
                                       (unsigned long)revoke_packet,
@@ -165,8 +165,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
 
        /* Teardown the gpadl on the vsp end */
        if (net_device->recv_buf_gpadl_handle) {
-               ret = vmbus_teardown_gpadl(net_device->dev->channel,
-                          net_device->recv_buf_gpadl_handle);
+               ret = vmbus_teardown_gpadl(device->channel,
+                                          net_device->recv_buf_gpadl_handle);
 
                /* If we failed here, we might as well return and have a leak
                 * rather than continue and a bugchk
@@ -207,7 +207,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
                revoke_packet->msg.v1_msg.revoke_send_buf.id =
                        NETVSC_SEND_BUFFER_ID;
 
-               ret = vmbus_sendpacket(net_device->dev->channel,
+               ret = vmbus_sendpacket(device->channel,
                                       revoke_packet,
                                       sizeof(struct nvsp_message),
                                       (unsigned long)revoke_packet,
@@ -223,7 +223,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
        }
        /* Teardown the gpadl on the vsp end */
        if (net_device->send_buf_gpadl_handle) {
-               ret = vmbus_teardown_gpadl(net_device->dev->channel,
+               ret = vmbus_teardown_gpadl(device->channel,
                                           net_device->send_buf_gpadl_handle);
 
                /* If we failed here, we might as well return and have a leak
@@ -258,7 +258,7 @@ static int netvsc_init_buf(struct hv_device *device)
        net_device = get_outbound_net_device(device);
        if (!net_device)
                return -ENODEV;
-       ndev = net_device->ndev;
+       ndev = hv_get_drvdata(device);
 
        node = cpu_to_node(device->channel->target_cpu);
        net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
@@ -435,7 +435,7 @@ static int netvsc_init_buf(struct hv_device *device)
        goto exit;
 
 cleanup:
-       netvsc_destroy_buf(net_device);
+       netvsc_destroy_buf(device);
 
 exit:
        return ret;
@@ -448,6 +448,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
                              struct nvsp_message *init_packet,
                              u32 nvsp_ver)
 {
+       struct net_device *ndev = hv_get_drvdata(device);
        int ret;
        unsigned long t;
 
@@ -481,8 +482,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
        /* NVSPv2 or later: Send NDIS config */
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
-       init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
-                                                      ETH_HLEN;
+       init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
        init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
 
        if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
@@ -502,7 +502,6 @@ static int netvsc_connect_vsp(struct hv_device *device)
        struct netvsc_device *net_device;
        struct nvsp_message *init_packet;
        int ndis_version;
-       struct net_device *ndev;
        u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
                NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
        int i, num_ver = 4; /* number of different NVSP versions */
@@ -510,7 +509,6 @@ static int netvsc_connect_vsp(struct hv_device *device)
        net_device = get_outbound_net_device(device);
        if (!net_device)
                return -ENODEV;
-       ndev = net_device->ndev;
 
        init_packet = &net_device->channel_init_pkt;
 
@@ -566,9 +564,9 @@ cleanup:
        return ret;
 }
 
-static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
+static void netvsc_disconnect_vsp(struct hv_device *device)
 {
-       netvsc_destroy_buf(net_device);
+       netvsc_destroy_buf(device);
 }
 
 /*
@@ -576,24 +574,13 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
  */
 int netvsc_device_remove(struct hv_device *device)
 {
-       struct netvsc_device *net_device;
-       unsigned long flags;
-
-       net_device = hv_get_drvdata(device);
-
-       netvsc_disconnect_vsp(net_device);
+       struct net_device *ndev = hv_get_drvdata(device);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *net_device = net_device_ctx->nvdev;
 
-       /*
-        * Since we have already drained, we don't need to busy wait
-        * as was done in final_release_stor_device()
-        * Note that we cannot set the ext pointer to NULL until
-        * we have drained - to drain the outgoing packets, we need to
-        * allow incoming packets.
-        */
+       netvsc_disconnect_vsp(device);
 
-       spin_lock_irqsave(&device->channel->inbound_lock, flags);
-       hv_set_drvdata(device, NULL);
-       spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+       net_device_ctx->nvdev = NULL;
 
        /*
         * At this point, no one should be accessing net_device
@@ -641,12 +628,11 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
 {
        struct nvsp_message *nvsp_packet;
        struct hv_netvsc_packet *nvsc_packet;
-       struct net_device *ndev;
+       struct net_device *ndev = hv_get_drvdata(device);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
        u32 send_index;
        struct sk_buff *skb;
 
-       ndev = net_device->ndev;
-
        nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
                        (packet->offset8 << 3));
 
@@ -691,7 +677,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
                        wake_up(&net_device->wait_drain);
 
                if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
-                   !net_device->start_remove &&
+                   !net_device_ctx->start_remove &&
                    (hv_ringbuf_avail_percent(&channel->outbound) >
                     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
                                netif_tx_wake_queue(netdev_get_tx_queue(
@@ -775,6 +761,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
 }
 
 static inline int netvsc_send_pkt(
+       struct hv_device *device,
        struct hv_netvsc_packet *packet,
        struct netvsc_device *net_device,
        struct hv_page_buffer **pb,
@@ -783,7 +770,7 @@ static inline int netvsc_send_pkt(
        struct nvsp_message nvmsg;
        u16 q_idx = packet->q_idx;
        struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
-       struct net_device *ndev = net_device->ndev;
+       struct net_device *ndev = hv_get_drvdata(device);
        u64 req_id;
        int ret;
        struct hv_page_buffer *pgbuf;
@@ -978,7 +965,8 @@ int netvsc_send(struct hv_device *device,
        }
 
        if (msd_send) {
-               m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
+               m_ret = netvsc_send_pkt(device, msd_send, net_device,
+                                       NULL, msd_skb);
 
                if (m_ret != 0) {
                        netvsc_free_send_slot(net_device,
@@ -989,7 +977,7 @@ int netvsc_send(struct hv_device *device,
 
 send_now:
        if (cur_send)
-               ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
+               ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
 
        if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
                netvsc_free_send_slot(net_device, section_index);
@@ -1005,9 +993,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
        struct nvsp_message recvcompMessage;
        int retries = 0;
        int ret;
-       struct net_device *ndev;
-
-       ndev = net_device->ndev;
+       struct net_device *ndev = hv_get_drvdata(device);
 
        recvcompMessage.hdr.msg_type =
                                NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
@@ -1054,11 +1040,9 @@ static void netvsc_receive(struct netvsc_device *net_device,
        u32 status = NVSP_STAT_SUCCESS;
        int i;
        int count = 0;
-       struct net_device *ndev;
+       struct net_device *ndev = hv_get_drvdata(device);
        void *data;
 
-       ndev = net_device->ndev;
-
        /*
         * All inbound packets other than send completion should be xfer page
         * packet
@@ -1114,14 +1098,13 @@ static void netvsc_send_table(struct hv_device *hdev,
                              struct nvsp_message *nvmsg)
 {
        struct netvsc_device *nvscdev;
-       struct net_device *ndev;
+       struct net_device *ndev = hv_get_drvdata(hdev);
        int i;
        u32 count, *tab;
 
        nvscdev = get_outbound_net_device(hdev);
        if (!nvscdev)
                return;
-       ndev = nvscdev->ndev;
 
        count = nvmsg->msg.v5_msg.send_table.count;
        if (count != VRSS_SEND_TAB_SIZE) {
@@ -1180,7 +1163,7 @@ void netvsc_channel_cb(void *context)
        net_device = get_inbound_net_device(device);
        if (!net_device)
                return;
-       ndev = net_device->ndev;
+       ndev = hv_get_drvdata(device);
        buffer = get_per_channel_state(channel);
 
        do {
@@ -1253,30 +1236,19 @@ void netvsc_channel_cb(void *context)
  */
 int netvsc_device_add(struct hv_device *device, void *additional_info)
 {
-       int ret = 0;
+       int i, ret = 0;
        int ring_size =
        ((struct netvsc_device_info *)additional_info)->ring_size;
        struct netvsc_device *net_device;
-       struct net_device *ndev;
+       struct net_device *ndev = hv_get_drvdata(device);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
 
-       net_device = alloc_net_device(device);
+       net_device = alloc_net_device();
        if (!net_device)
                return -ENOMEM;
 
        net_device->ring_size = ring_size;
 
-       /*
-        * Coming into this function, struct net_device * is
-        * registered as the driver private data.
-        * In alloc_net_device(), we register struct netvsc_device *
-        * as the driver private data and stash away struct net_device *
-        * in struct netvsc_device *.
-        */
-       ndev = net_device->ndev;
-
-       /* Add netvsc_device context to netvsc_device */
-       net_device->nd_ctx = netdev_priv(ndev);
-
        /* Initialize the NetVSC channel extension */
        init_completion(&net_device->channel_init_wait);
 
@@ -1295,7 +1267,19 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
        /* Channel is opened */
        pr_info("hv_netvsc channel opened successfully\n");
 
-       net_device->chn_table[0] = device->channel;
+       /* If we're reopening the device we may have multiple queues, fill the
+        * chn_table with the default channel to use it before subchannels are
+        * opened.
+        */
+       for (i = 0; i < VRSS_CHANNEL_MAX; i++)
+               net_device->chn_table[i] = device->channel;
+
+       /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
+        * populated.
+        */
+       wmb();
+
+       net_device_ctx->nvdev = net_device;
 
        /* Connect with the NetVsp */
        ret = netvsc_connect_vsp(device);
index ba3f3f3d48efcb27b34b1aa6be1786d6839751c3..6a69b5cc9fe271c5017408da65b13de9ad533e96 100644 (file)
@@ -67,18 +67,19 @@ static void do_set_multicast(struct work_struct *w)
 {
        struct net_device_context *ndevctx =
                container_of(w, struct net_device_context, work);
-       struct netvsc_device *nvdev;
+       struct hv_device *device_obj = ndevctx->device_ctx;
+       struct net_device *ndev = hv_get_drvdata(device_obj);
+       struct netvsc_device *nvdev = ndevctx->nvdev;
        struct rndis_device *rdev;
 
-       nvdev = hv_get_drvdata(ndevctx->device_ctx);
-       if (nvdev == NULL || nvdev->ndev == NULL)
+       if (!nvdev)
                return;
 
        rdev = nvdev->extension;
        if (rdev == NULL)
                return;
 
-       if (nvdev->ndev->flags & IFF_PROMISC)
+       if (ndev->flags & IFF_PROMISC)
                rndis_filter_set_packet_filter(rdev,
                        NDIS_PACKET_TYPE_PROMISCUOUS);
        else
@@ -99,7 +100,7 @@ static int netvsc_open(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct hv_device *device_obj = net_device_ctx->device_ctx;
-       struct netvsc_device *nvdev;
+       struct netvsc_device *nvdev = net_device_ctx->nvdev;
        struct rndis_device *rdev;
        int ret = 0;
 
@@ -114,7 +115,6 @@ static int netvsc_open(struct net_device *net)
 
        netif_tx_wake_all_queues(net);
 
-       nvdev = hv_get_drvdata(device_obj);
        rdev = nvdev->extension;
        if (!rdev->link_state)
                netif_carrier_on(net);
@@ -126,7 +126,7 @@ static int netvsc_close(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct hv_device *device_obj = net_device_ctx->device_ctx;
-       struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
+       struct netvsc_device *nvdev = net_device_ctx->nvdev;
        int ret;
        u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
        struct vmbus_channel *chn;
@@ -205,8 +205,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
                        void *accel_priv, select_queue_fallback_t fallback)
 {
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
-       struct hv_device *hdev =  net_device_ctx->device_ctx;
-       struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
+       struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
        u32 hash;
        u16 q_idx = 0;
 
@@ -580,7 +579,6 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
        struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
        struct net_device *net;
        struct net_device_context *ndev_ctx;
-       struct netvsc_device *net_device;
        struct netvsc_reconfig *event;
        unsigned long flags;
 
@@ -590,8 +588,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
            indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
                return;
 
-       net_device = hv_get_drvdata(device_obj);
-       net = net_device->ndev;
+       net = hv_get_drvdata(device_obj);
 
        if (!net || net->reg_state != NETREG_REGISTERED)
                return;
@@ -659,16 +656,15 @@ int netvsc_recv_callback(struct hv_device *device_obj,
                                struct vmbus_channel *channel,
                                u16 vlan_tci)
 {
-       struct net_device *net;
-       struct net_device_context *net_device_ctx;
+       struct net_device *net = hv_get_drvdata(device_obj);
+       struct net_device_context *net_device_ctx = netdev_priv(net);
        struct sk_buff *skb;
        struct sk_buff *vf_skb;
        struct netvsc_stats *rx_stats;
-       struct netvsc_device *netvsc_dev = hv_get_drvdata(device_obj);
+       struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
        u32 bytes_recvd = packet->total_data_buflen;
        int ret = 0;
 
-       net = netvsc_dev->ndev;
        if (!net || net->reg_state != NETREG_REGISTERED)
                return NVSP_STAT_FAIL;
 
@@ -743,8 +739,7 @@ static void netvsc_get_channels(struct net_device *net,
                                struct ethtool_channels *channel)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
-       struct hv_device *dev = net_device_ctx->device_ctx;
-       struct netvsc_device *nvdev = hv_get_drvdata(dev);
+       struct netvsc_device *nvdev = net_device_ctx->nvdev;
 
        if (nvdev) {
                channel->max_combined   = nvdev->max_chn;
@@ -757,14 +752,14 @@ static int netvsc_set_channels(struct net_device *net,
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct hv_device *dev = net_device_ctx->device_ctx;
-       struct netvsc_device *nvdev = hv_get_drvdata(dev);
+       struct netvsc_device *nvdev = net_device_ctx->nvdev;
        struct netvsc_device_info device_info;
        u32 num_chn;
        u32 max_chn;
        int ret = 0;
        bool recovering = false;
 
-       if (!nvdev || nvdev->destroy)
+       if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
                return -ENODEV;
 
        num_chn = nvdev->num_chn;
@@ -793,14 +788,11 @@ static int netvsc_set_channels(struct net_device *net,
                goto out;
 
  do_set:
-       nvdev->start_remove = true;
+       net_device_ctx->start_remove = true;
        rndis_filter_device_remove(dev);
 
        nvdev->num_chn = channels->combined_count;
 
-       net_device_ctx->device_ctx = dev;
-       hv_set_drvdata(dev, net);
-
        memset(&device_info, 0, sizeof(device_info));
        device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
        device_info.ring_size = ring_size;
@@ -815,7 +807,7 @@ static int netvsc_set_channels(struct net_device *net,
                goto recover;
        }
 
-       nvdev = hv_get_drvdata(dev);
+       nvdev = net_device_ctx->nvdev;
 
        ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
        if (ret) {
@@ -837,6 +829,9 @@ static int netvsc_set_channels(struct net_device *net,
 
  out:
        netvsc_open(net);
+       net_device_ctx->start_remove = false;
+       /* We may have missed link change notifications */
+       schedule_delayed_work(&net_device_ctx->dwork, 0);
 
        return ret;
 
@@ -905,14 +900,14 @@ static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 {
        struct net_device_context *ndevctx = netdev_priv(ndev);
-       struct hv_device *hdev =  ndevctx->device_ctx;
-       struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+       struct netvsc_device *nvdev = ndevctx->nvdev;
+       struct hv_device *hdev = ndevctx->device_ctx;
        struct netvsc_device_info device_info;
        int limit = ETH_DATA_LEN;
        u32 num_chn;
        int ret = 0;
 
-       if (nvdev == NULL || nvdev->destroy)
+       if (ndevctx->start_remove || !nvdev || nvdev->destroy)
                return -ENODEV;
 
        if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
@@ -927,14 +922,11 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 
        num_chn = nvdev->num_chn;
 
-       nvdev->start_remove = true;
+       ndevctx->start_remove = true;
        rndis_filter_device_remove(hdev);
 
        ndev->mtu = mtu;
 
-       ndevctx->device_ctx = hdev;
-       hv_set_drvdata(hdev, ndev);
-
        memset(&device_info, 0, sizeof(device_info));
        device_info.ring_size = ring_size;
        device_info.num_chn = num_chn;
@@ -943,6 +935,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 
 out:
        netvsc_open(ndev);
+       ndevctx->start_remove = false;
+
+       /* We may have missed link change notifications */
+       schedule_delayed_work(&ndevctx->dwork, 0);
 
        return ret;
 }
@@ -1055,18 +1051,22 @@ static const struct net_device_ops device_ops = {
  */
 static void netvsc_link_change(struct work_struct *w)
 {
-       struct net_device_context *ndev_ctx;
-       struct net_device *net;
+       struct net_device_context *ndev_ctx =
+               container_of(w, struct net_device_context, dwork.work);
+       struct hv_device *device_obj = ndev_ctx->device_ctx;
+       struct net_device *net = hv_get_drvdata(device_obj);
        struct netvsc_device *net_device;
        struct rndis_device *rdev;
        struct netvsc_reconfig *event = NULL;
        bool notify = false, reschedule = false;
        unsigned long flags, next_reconfig, delay;
 
-       ndev_ctx = container_of(w, struct net_device_context, dwork.work);
-       net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+       rtnl_lock();
+       if (ndev_ctx->start_remove)
+               goto out_unlock;
+
+       net_device = ndev_ctx->nvdev;
        rdev = net_device->extension;
-       net = net_device->ndev;
 
        next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
        if (time_is_after_jiffies(next_reconfig)) {
@@ -1077,7 +1077,7 @@ static void netvsc_link_change(struct work_struct *w)
                delay = next_reconfig - jiffies;
                delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
                schedule_delayed_work(&ndev_ctx->dwork, delay);
-               return;
+               goto out_unlock;
        }
        ndev_ctx->last_reconfig = jiffies;
 
@@ -1091,9 +1091,7 @@ static void netvsc_link_change(struct work_struct *w)
        spin_unlock_irqrestore(&ndev_ctx->lock, flags);
 
        if (!event)
-               return;
-
-       rtnl_lock();
+               goto out_unlock;
 
        switch (event->event) {
                /* Only the following events are possible due to the check in
@@ -1142,6 +1140,11 @@ static void netvsc_link_change(struct work_struct *w)
         */
        if (reschedule)
                schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+
+       return;
+
+out_unlock:
+       rtnl_unlock();
 }
 
 static void netvsc_free_netdev(struct net_device *netdev)
@@ -1164,10 +1167,9 @@ static void netvsc_notify_peers(struct work_struct *wrk)
        atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
 }
 
-static struct netvsc_device *get_netvsc_device(char *mac)
+static struct net_device *get_netvsc_net_device(char *mac)
 {
-       struct net_device *dev;
-       struct net_device_context *netvsc_ctx = NULL;
+       struct net_device *dev, *found = NULL;
        int rtnl_locked;
 
        rtnl_locked = rtnl_trylock();
@@ -1176,21 +1178,20 @@ static struct netvsc_device *get_netvsc_device(char *mac)
                if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
                        if (dev->netdev_ops != &device_ops)
                                continue;
-                       netvsc_ctx = netdev_priv(dev);
+                       found = dev;
                        break;
                }
        }
        if (rtnl_locked)
                rtnl_unlock();
 
-       if (netvsc_ctx == NULL)
-               return NULL;
-
-       return hv_get_drvdata(netvsc_ctx->device_ctx);
+       return found;
 }
 
 static int netvsc_register_vf(struct net_device *vf_netdev)
 {
+       struct net_device *ndev;
+       struct net_device_context *net_device_ctx;
        struct netvsc_device *netvsc_dev;
        const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
 
@@ -1202,11 +1203,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
         * associate with the VF interface. If we don't find a matching
         * synthetic interface, move on.
         */
-       netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+       ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+       if (!ndev)
+               return NOTIFY_DONE;
+
+       net_device_ctx = netdev_priv(ndev);
+       netvsc_dev = net_device_ctx->nvdev;
        if (netvsc_dev == NULL)
                return NOTIFY_DONE;
 
-       netdev_info(netvsc_dev->ndev, "VF registering: %s\n", vf_netdev->name);
+       netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
        /*
         * Take a reference on the module.
         */
@@ -1218,6 +1224,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
 
 static int netvsc_vf_up(struct net_device *vf_netdev)
 {
+       struct net_device *ndev;
        struct netvsc_device *netvsc_dev;
        const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
        struct net_device_context *net_device_ctx;
@@ -1225,13 +1232,17 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
        if (eth_ops == &ethtool_ops)
                return NOTIFY_DONE;
 
-       netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+       ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+       if (!ndev)
+               return NOTIFY_DONE;
+
+       net_device_ctx = netdev_priv(ndev);
+       netvsc_dev = net_device_ctx->nvdev;
 
        if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
                return NOTIFY_DONE;
 
-       netdev_info(netvsc_dev->ndev, "VF up: %s\n", vf_netdev->name);
-       net_device_ctx = netdev_priv(netvsc_dev->ndev);
+       netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
        netvsc_dev->vf_inject = true;
 
        /*
@@ -1242,11 +1253,10 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
        /*
         * notify the host to switch the data path.
         */
-       netvsc_switch_datapath(netvsc_dev, true);
-       netdev_info(netvsc_dev->ndev, "Data path switched to VF: %s\n",
-                   vf_netdev->name);
+       netvsc_switch_datapath(ndev, true);
+       netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
 
-       netif_carrier_off(netvsc_dev->ndev);
+       netif_carrier_off(ndev);
 
        /*
         * Now notify peers. We are scheduling work to
@@ -1264,6 +1274,7 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
 
 static int netvsc_vf_down(struct net_device *vf_netdev)
 {
+       struct net_device *ndev;
        struct netvsc_device *netvsc_dev;
        struct net_device_context *net_device_ctx;
        const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
@@ -1271,13 +1282,17 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
        if (eth_ops == &ethtool_ops)
                return NOTIFY_DONE;
 
-       netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+       ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+       if (!ndev)
+               return NOTIFY_DONE;
+
+       net_device_ctx = netdev_priv(ndev);
+       netvsc_dev = net_device_ctx->nvdev;
 
        if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
                return NOTIFY_DONE;
 
-       netdev_info(netvsc_dev->ndev, "VF down: %s\n", vf_netdev->name);
-       net_device_ctx = netdev_priv(netvsc_dev->ndev);
+       netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
        netvsc_dev->vf_inject = false;
        /*
         * Wait for currently active users to
@@ -1286,16 +1301,15 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
 
        while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
                udelay(50);
-       netvsc_switch_datapath(netvsc_dev, false);
-       netdev_info(netvsc_dev->ndev, "Data path switched from VF: %s\n",
-                   vf_netdev->name);
+       netvsc_switch_datapath(ndev, false);
+       netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
        rndis_filter_close(net_device_ctx->device_ctx);
-       netif_carrier_on(netvsc_dev->ndev);
+       netif_carrier_on(ndev);
        /*
         * Notify peers.
         */
        atomic_inc(&netvsc_dev->vf_use_cnt);
-       net_device_ctx->gwrk.netdev = netvsc_dev->ndev;
+       net_device_ctx->gwrk.netdev = ndev;
        net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
        schedule_work(&net_device_ctx->gwrk.dwrk);
 
@@ -1305,17 +1319,23 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
 
 static int netvsc_unregister_vf(struct net_device *vf_netdev)
 {
+       struct net_device *ndev;
        struct netvsc_device *netvsc_dev;
        const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+       struct net_device_context *net_device_ctx;
 
        if (eth_ops == &ethtool_ops)
                return NOTIFY_DONE;
 
-       netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+       ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+       if (!ndev)
+               return NOTIFY_DONE;
+
+       net_device_ctx = netdev_priv(ndev);
+       netvsc_dev = net_device_ctx->nvdev;
        if (netvsc_dev == NULL)
                return NOTIFY_DONE;
-       netdev_info(netvsc_dev->ndev, "VF unregistering: %s\n",
-                   vf_netdev->name);
+       netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
 
        netvsc_dev->vf_netdev = NULL;
        module_put(THIS_MODULE);
@@ -1358,6 +1378,9 @@ static int netvsc_probe(struct hv_device *dev,
        }
 
        hv_set_drvdata(dev, net);
+
+       net_device_ctx->start_remove = false;
+
        INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
        INIT_WORK(&net_device_ctx->work, do_set_multicast);
        INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
@@ -1389,7 +1412,7 @@ static int netvsc_probe(struct hv_device *dev,
        }
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
-       nvdev = hv_get_drvdata(dev);
+       nvdev = net_device_ctx->nvdev;
        netif_set_real_num_tx_queues(net, nvdev->num_chn);
        netif_set_real_num_rx_queues(net, nvdev->num_chn);
 
@@ -1411,17 +1434,24 @@ static int netvsc_remove(struct hv_device *dev)
        struct net_device_context *ndev_ctx;
        struct netvsc_device *net_device;
 
-       net_device = hv_get_drvdata(dev);
-       net = net_device->ndev;
+       net = hv_get_drvdata(dev);
 
        if (net == NULL) {
                dev_err(&dev->device, "No net device to remove\n");
                return 0;
        }
 
-       net_device->start_remove = true;
 
        ndev_ctx = netdev_priv(net);
+       net_device = ndev_ctx->nvdev;
+
+       /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
+        * removing the device.
+        */
+       rtnl_lock();
+       ndev_ctx->start_remove = true;
+       rtnl_unlock();
+
        cancel_delayed_work_sync(&ndev_ctx->dwork);
        cancel_work_sync(&ndev_ctx->work);
 
@@ -1436,6 +1466,8 @@ static int netvsc_remove(struct hv_device *dev)
         */
        rndis_filter_device_remove(dev);
 
+       hv_set_drvdata(dev, NULL);
+
        netvsc_free_netdev(net);
        return 0;
 }
index a59cdebc9b4b32c116326d66b2de0d475247e543..97c292b7dbea0c06cca508935c1323bbd9ebedd9 100644 (file)
@@ -126,11 +126,7 @@ static void put_rndis_request(struct rndis_device *dev,
 static void dump_rndis_message(struct hv_device *hv_dev,
                        struct rndis_message *rndis_msg)
 {
-       struct net_device *netdev;
-       struct netvsc_device *net_device;
-
-       net_device = hv_get_drvdata(hv_dev);
-       netdev = net_device->ndev;
+       struct net_device *netdev = hv_get_drvdata(hv_dev);
 
        switch (rndis_msg->ndis_msg_type) {
        case RNDIS_MSG_PACKET:
@@ -211,6 +207,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
        struct hv_netvsc_packet *packet;
        struct hv_page_buffer page_buf[2];
        struct hv_page_buffer *pb = page_buf;
+       struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
 
        /* Setup the packet to send it */
        packet = &req->pkt;
@@ -236,7 +233,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
                        pb[0].len;
        }
 
-       ret = netvsc_send(dev->net_dev->dev, packet, NULL, &pb, NULL);
+       ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL);
        return ret;
 }
 
@@ -262,9 +259,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
        struct rndis_request *request = NULL;
        bool found = false;
        unsigned long flags;
-       struct net_device *ndev;
-
-       ndev = dev->net_dev->ndev;
+       struct net_device *ndev = dev->ndev;
 
        spin_lock_irqsave(&dev->request_lock, flags);
        list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -355,6 +350,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev,
        struct ndis_pkt_8021q_info *vlan;
        struct ndis_tcp_ip_checksum_info *csum_info;
        u16 vlan_tci = 0;
+       struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
 
        rndis_pkt = &msg->msg.pkt;
 
@@ -368,7 +364,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev,
         * should be the data packet size plus the trailer padding size
         */
        if (pkt->total_data_buflen < rndis_pkt->data_len) {
-               netdev_err(dev->net_dev->ndev, "rndis message buffer "
+               netdev_err(dev->ndev, "rndis message buffer "
                           "overflow detected (got %u, min %u)"
                           "...dropping this message!\n",
                           pkt->total_data_buflen, rndis_pkt->data_len);
@@ -390,7 +386,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev,
        }
 
        csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
-       return netvsc_recv_callback(dev->net_dev->dev, pkt, data,
+       return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data,
                                    csum_info, channel, vlan_tci);
 }
 
@@ -399,10 +395,11 @@ int rndis_filter_receive(struct hv_device *dev,
                                void **data,
                                struct vmbus_channel *channel)
 {
-       struct netvsc_device *net_dev = hv_get_drvdata(dev);
+       struct net_device *ndev = hv_get_drvdata(dev);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *net_dev = net_device_ctx->nvdev;
        struct rndis_device *rndis_dev;
        struct rndis_message *rndis_msg;
-       struct net_device *ndev;
        int ret = 0;
 
        if (!net_dev) {
@@ -410,8 +407,6 @@ int rndis_filter_receive(struct hv_device *dev,
                goto exit;
        }
 
-       ndev = net_dev->ndev;
-
        /* Make sure the rndis device state is initialized */
        if (!net_dev->extension) {
                netdev_err(ndev, "got rndis message but no rndis device - "
@@ -430,7 +425,7 @@ int rndis_filter_receive(struct hv_device *dev,
 
        rndis_msg = *data;
 
-       if (netif_msg_rx_err(net_dev->nd_ctx))
+       if (netif_msg_rx_err(net_device_ctx))
                dump_rndis_message(dev, rndis_msg);
 
        switch (rndis_msg->ndis_msg_type) {
@@ -550,9 +545,10 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
 
 int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
 {
-       struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+       struct net_device *ndev = hv_get_drvdata(hdev);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *nvdev = net_device_ctx->nvdev;
        struct rndis_device *rdev = nvdev->extension;
-       struct net_device *ndev = nvdev->ndev;
        struct rndis_request *request;
        struct rndis_set_request *set;
        struct rndis_config_parameter_info *cpi;
@@ -629,9 +625,10 @@ static int
 rndis_filter_set_offload_params(struct hv_device *hdev,
                                struct ndis_offload_params *req_offloads)
 {
-       struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+       struct net_device *ndev = hv_get_drvdata(hdev);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *nvdev = net_device_ctx->nvdev;
        struct rndis_device *rdev = nvdev->extension;
-       struct net_device *ndev = nvdev->ndev;
        struct rndis_request *request;
        struct rndis_set_request *set;
        struct ndis_offload_params *offload_params;
@@ -703,7 +700,7 @@ u8 netvsc_hash_key[HASH_KEYLEN] = {
 
 static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
 {
-       struct net_device *ndev = rdev->net_dev->ndev;
+       struct net_device *ndev = rdev->ndev;
        struct rndis_request *request;
        struct rndis_set_request *set;
        struct rndis_set_complete *set_complete;
@@ -799,9 +796,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
        u32 status;
        int ret;
        unsigned long t;
-       struct net_device *ndev;
-
-       ndev = dev->net_dev->ndev;
+       struct net_device *ndev = dev->ndev;
 
        request = get_rndis_request(dev, RNDIS_MSG_SET,
                        RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
@@ -856,7 +851,8 @@ static int rndis_filter_init_device(struct rndis_device *dev)
        u32 status;
        int ret;
        unsigned long t;
-       struct netvsc_device *nvdev = dev->net_dev;
+       struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
+       struct netvsc_device *nvdev = net_device_ctx->nvdev;
 
        request = get_rndis_request(dev, RNDIS_MSG_INIT,
                        RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -879,7 +875,6 @@ static int rndis_filter_init_device(struct rndis_device *dev)
                goto cleanup;
        }
 
-
        t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 
        if (t == 0) {
@@ -910,8 +905,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
 {
        struct rndis_request *request;
        struct rndis_halt_request *halt;
-       struct netvsc_device *nvdev = dev->net_dev;
-       struct hv_device *hdev = nvdev->dev;
+       struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
+       struct netvsc_device *nvdev = net_device_ctx->nvdev;
+       struct hv_device *hdev = net_device_ctx->device_ctx;
        ulong flags;
 
        /* Attempt to do a rndis device halt */
@@ -979,13 +975,14 @@ static int rndis_filter_close_device(struct rndis_device *dev)
 
 static void netvsc_sc_open(struct vmbus_channel *new_sc)
 {
-       struct netvsc_device *nvscdev;
+       struct net_device *ndev =
+               hv_get_drvdata(new_sc->primary_channel->device_obj);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *nvscdev = net_device_ctx->nvdev;
        u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
        int ret;
        unsigned long flags;
 
-       nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
-
        if (chn_index >= nvscdev->num_chn)
                return;
 
@@ -1010,6 +1007,8 @@ int rndis_filter_device_add(struct hv_device *dev,
                                  void *additional_info)
 {
        int ret;
+       struct net_device *net = hv_get_drvdata(dev);
+       struct net_device_context *net_device_ctx = netdev_priv(net);
        struct netvsc_device *net_device;
        struct rndis_device *rndis_device;
        struct netvsc_device_info *device_info = additional_info;
@@ -1040,16 +1039,15 @@ int rndis_filter_device_add(struct hv_device *dev,
                return ret;
        }
 
-
        /* Initialize the rndis device */
-       net_device = hv_get_drvdata(dev);
+       net_device = net_device_ctx->nvdev;
        net_device->max_chn = 1;
        net_device->num_chn = 1;
 
        spin_lock_init(&net_device->sc_lock);
 
        net_device->extension = rndis_device;
-       rndis_device->net_dev = net_device;
+       rndis_device->ndev = net;
 
        /* Send the rndis initialization message */
        ret = rndis_filter_init_device(rndis_device);
@@ -1063,8 +1061,8 @@ int rndis_filter_device_add(struct hv_device *dev,
        ret = rndis_filter_query_device(rndis_device,
                                        RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
                                        &mtu, &size);
-       if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
-               net_device->ndev->mtu = mtu;
+       if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
+               net->mtu = mtu;
 
        /* Get the mac address */
        ret = rndis_filter_query_device_mac(rndis_device);
@@ -1198,7 +1196,9 @@ err_dev_remv:
 
 void rndis_filter_device_remove(struct hv_device *dev)
 {
-       struct netvsc_device *net_dev = hv_get_drvdata(dev);
+       struct net_device *ndev = hv_get_drvdata(dev);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *net_dev = net_device_ctx->nvdev;
        struct rndis_device *rndis_dev = net_dev->extension;
        unsigned long t;
 
@@ -1224,7 +1224,9 @@ void rndis_filter_device_remove(struct hv_device *dev)
 
 int rndis_filter_open(struct hv_device *dev)
 {
-       struct netvsc_device *net_device = hv_get_drvdata(dev);
+       struct net_device *ndev = hv_get_drvdata(dev);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *net_device = net_device_ctx->nvdev;
 
        if (!net_device)
                return -EINVAL;
@@ -1237,7 +1239,9 @@ int rndis_filter_open(struct hv_device *dev)
 
 int rndis_filter_close(struct hv_device *dev)
 {
-       struct netvsc_device *nvdev = hv_get_drvdata(dev);
+       struct net_device *ndev = hv_get_drvdata(dev);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
+       struct netvsc_device *nvdev = net_device_ctx->nvdev;
 
        if (!nvdev)
                return -EINVAL;
index f6078376ef500f286364e40c0c10bc70ecb3358a..b9fde1bcf0f0c302ed79d97c6a717d5f1ddfe7fe 100644 (file)
@@ -80,23 +80,15 @@ static int lxt970_ack_interrupt(struct phy_device *phydev)
 
 static int lxt970_config_intr(struct phy_device *phydev)
 {
-       int err;
-
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-               err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
+               return phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
        else
-               err = phy_write(phydev, MII_LXT970_IER, 0);
-
-       return err;
+               return phy_write(phydev, MII_LXT970_IER, 0);
 }
 
 static int lxt970_config_init(struct phy_device *phydev)
 {
-       int err;
-
-       err = phy_write(phydev, MII_LXT970_CONFIG, 0);
-
-       return err;
+       return phy_write(phydev, MII_LXT970_CONFIG, 0);
 }
 
 
@@ -112,14 +104,10 @@ static int lxt971_ack_interrupt(struct phy_device *phydev)
 
 static int lxt971_config_intr(struct phy_device *phydev)
 {
-       int err;
-
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-               err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
+               return phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
        else
-               err = phy_write(phydev, MII_LXT971_IER, 0);
-
-       return err;
+               return phy_write(phydev, MII_LXT971_IER, 0);
 }
 
 /*
index 603e8db50162d5c40fc7dfcb6ca93309f2eb1202..c5dc2c363f96fc296120c29eb7c700e25f619e15 100644 (file)
@@ -871,9 +871,11 @@ void phy_start(struct phy_device *phydev)
                break;
        case PHY_HALTED:
                /* make sure interrupts are re-enabled for the PHY */
-               err = phy_enable_interrupts(phydev);
-               if (err < 0)
-                       break;
+               if (phydev->irq != PHY_POLL) {
+                       err = phy_enable_interrupts(phydev);
+                       if (err < 0)
+                               break;
+               }
 
                phydev->state = PHY_RESUMING;
                do_resume = true;
index e977ba931878e77cb149fd5a66c5618f723500a3..307f72a0f2e2da531d12157db7ed1ea0878c2fdf 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/io.h>
 #include <linux/uaccess.h>
 #include <linux/of.h>
+#include <linux/gpio/consumer.h>
 
 #include <asm/irq.h>
 
@@ -1570,9 +1571,16 @@ static int phy_probe(struct device *dev)
        struct device_driver *drv = phydev->mdio.dev.driver;
        struct phy_driver *phydrv = to_phy_driver(drv);
        int err = 0;
+       struct gpio_descs *reset_gpios;
 
        phydev->drv = phydrv;
 
+       /* take phy out of reset */
+       reset_gpios = devm_gpiod_get_array_optional(dev, "reset",
+                                                   GPIOD_OUT_LOW);
+       if (IS_ERR(reset_gpios))
+               return PTR_ERR(reset_gpios);
+
        /* Disable the interrupt if the PHY doesn't support it
         * but the interrupt is still a valid one
         */
index 0ea29345eb2e92cd2fb4be55584d9f0410444ac1..dff08842f26d034dc21a7a73b8a1c9b0cb72236a 100644 (file)
@@ -43,8 +43,8 @@
 #define DRV_VERSION    "1.0"
 
 struct net_vrf {
-       struct rtable           *rth;
-       struct rt6_info         *rt6;
+       struct rtable __rcu     *rth;
+       struct rt6_info __rcu   *rt6;
        u32                     tb_id;
 };
 
@@ -273,10 +273,15 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
+/* holding rtnl */
 static void vrf_rt6_release(struct net_vrf *vrf)
 {
-       dst_release(&vrf->rt6->dst);
-       vrf->rt6 = NULL;
+       struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
+
+       rcu_assign_pointer(vrf->rt6, NULL);
+
+       if (rt6)
+               dst_release(&rt6->dst);
 }
 
 static int vrf_rt6_create(struct net_device *dev)
@@ -300,7 +305,8 @@ static int vrf_rt6_create(struct net_device *dev)
 
        rt6->rt6i_table = rt6i_table;
        rt6->dst.output = vrf_output6;
-       vrf->rt6 = rt6;
+       rcu_assign_pointer(vrf->rt6, rt6);
+
        rc = 0;
 out:
        return rc;
@@ -374,29 +380,35 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
+/* holding rtnl */
 static void vrf_rtable_release(struct net_vrf *vrf)
 {
-       struct dst_entry *dst = (struct dst_entry *)vrf->rth;
+       struct rtable *rth = rtnl_dereference(vrf->rth);
+
+       rcu_assign_pointer(vrf->rth, NULL);
 
-       dst_release(dst);
-       vrf->rth = NULL;
+       if (rth)
+               dst_release(&rth->dst);
 }
 
-static struct rtable *vrf_rtable_create(struct net_device *dev)
+static int vrf_rtable_create(struct net_device *dev)
 {
        struct net_vrf *vrf = netdev_priv(dev);
        struct rtable *rth;
 
        if (!fib_new_table(dev_net(dev), vrf->tb_id))
-               return NULL;
+               return -ENOMEM;
 
        rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
-       if (rth) {
-               rth->dst.output = vrf_output;
-               rth->rt_table_id = vrf->tb_id;
-       }
+       if (!rth)
+               return -ENOMEM;
 
-       return rth;
+       rth->dst.output = vrf_output;
+       rth->rt_table_id = vrf->tb_id;
+
+       rcu_assign_pointer(vrf->rth, rth);
+
+       return 0;
 }
 
 /**************************** device handling ********************/
@@ -484,8 +496,7 @@ static int vrf_dev_init(struct net_device *dev)
                goto out_nomem;
 
        /* create the default dst which points back to us */
-       vrf->rth = vrf_rtable_create(dev);
-       if (!vrf->rth)
+       if (vrf_rtable_create(dev) != 0)
                goto out_stats;
 
        if (vrf_rt6_create(dev) != 0)
@@ -528,8 +539,13 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
        if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
                struct net_vrf *vrf = netdev_priv(dev);
 
-               rth = vrf->rth;
-               dst_hold(&rth->dst);
+               rcu_read_lock();
+
+               rth = rcu_dereference(vrf->rth);
+               if (likely(rth))
+                       dst_hold(&rth->dst);
+
+               rcu_read_unlock();
        }
 
        return rth;
@@ -665,16 +681,24 @@ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
 static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
                                         const struct flowi6 *fl6)
 {
-       struct rt6_info *rt = NULL;
+       struct dst_entry *dst = NULL;
 
        if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
                struct net_vrf *vrf = netdev_priv(dev);
+               struct rt6_info *rt;
+
+               rcu_read_lock();
+
+               rt = rcu_dereference(vrf->rt6);
+               if (likely(rt)) {
+                       dst = &rt->dst;
+                       dst_hold(dst);
+               }
 
-               rt = vrf->rt6;
-               dst_hold(&rt->dst);
+               rcu_read_unlock();
        }
 
-       return (struct dst_entry *)rt;
+       return dst;
 }
 #endif
 
index 2f29d20aa08f661c81630e7a3b4b922ed1b61966..25ab6bf013c4d2d12aa7619d77628f4793ef6f64 100644 (file)
@@ -1381,6 +1381,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
                if (!vxlan_set_mac(vxlan, vs, skb))
                        goto drop;
        } else {
+               skb_reset_mac_header(skb);
                skb->dev = vxlan->dev;
                skb->pkt_type = PACKET_HOST;
        }
index bd286fca3776b0f52b95818b848e7d7668988a8e..880210917a6f6606d16e43c02bd978b4edd0e94e 100644 (file)
@@ -442,10 +442,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
  */
 static struct iwl_device_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
-                     int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
+                     struct ieee80211_tx_info *info, int hdrlen,
+                     struct ieee80211_sta *sta, u8 sta_id)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
 
@@ -465,10 +466,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
 
-       memset(&info->status, 0, sizeof(info->status));
-       memset(info->driver_data, 0, sizeof(info->driver_data));
+       memset(&skb_info->status, 0, sizeof(skb_info->status));
+       memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
 
-       info->driver_data[1] = dev_cmd;
+       skb_info->driver_data[1] = dev_cmd;
 
        return dev_cmd;
 }
@@ -476,22 +477,25 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info info;
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        u8 sta_id;
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
-       if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
+       memcpy(&info, skb->cb, sizeof(info));
+
+       if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
                return -1;
 
-       if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
-                        (!info->control.vif ||
-                         info->hw_queue != info->control.vif->cab_queue)))
+       if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
+                        (!info.control.vif ||
+                         info.hw_queue != info.control.vif->cab_queue)))
                return -1;
 
        /* This holds the amsdu headers length */
-       info->driver_data[0] = (void *)(uintptr_t)0;
+       skb_info->driver_data[0] = (void *)(uintptr_t)0;
 
        /*
         * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
@@ -500,7 +504,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
         * and hence needs to be sent on the aux queue
         */
        if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
-           info->control.vif->type == NL80211_IFTYPE_STATION)
+           info.control.vif->type == NL80211_IFTYPE_STATION)
                IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
 
        /*
@@ -513,14 +517,14 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
         * AUX station.
         */
        sta_id = mvm->aux_sta.sta_id;
-       if (info->control.vif) {
+       if (info.control.vif) {
                struct iwl_mvm_vif *mvmvif =
-                       iwl_mvm_vif_from_mac80211(info->control.vif);
+                       iwl_mvm_vif_from_mac80211(info.control.vif);
 
-               if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-                   info->control.vif->type == NL80211_IFTYPE_AP)
+               if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+                   info.control.vif->type == NL80211_IFTYPE_AP)
                        sta_id = mvmvif->bcast_sta.sta_id;
-               else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+               else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
                         is_multicast_ether_addr(hdr->addr1)) {
                        u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
 
@@ -529,19 +533,18 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                }
        }
 
-       IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
+       IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue);
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
        if (!dev_cmd)
                return -1;
 
-       /* From now on, we cannot access info->control */
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        /* Copy MAC header from skb into command buffer */
        memcpy(tx_cmd->hdr, hdr, hdrlen);
 
-       if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
+       if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
                return -1;
        }
@@ -560,11 +563,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 
 #ifdef CONFIG_INET
 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff_head *mpdus_skb)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (void *)skb->data;
        unsigned int mss = skb_shinfo(skb)->gso_size;
        struct sk_buff *tmp, *next;
@@ -673,6 +676,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        /* This skb fits in one single A-MSDU */
        if (num_subframes * mss >= tcp_payload_len) {
+               struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+
                /*
                 * Compute the length of all the data added for the A-MSDU.
                 * This will be used to compute the length to write in the TX
@@ -681,11 +686,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
                 * already had one set of SNAP / IP / TCP headers.
                 */
                num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-               info = IEEE80211_SKB_CB(skb);
                amsdu_add = num_subframes * sizeof(struct ethhdr) +
                        (num_subframes - 1) * (snap_ip_tcp + pad);
                /* This holds the amsdu headers length */
-               info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
+               skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
 
                __skb_queue_tail(mpdus_skb, skb);
                return 0;
@@ -725,11 +729,14 @@ segment:
                        ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
 
                if (tcp_payload_len > mss) {
+                       struct ieee80211_tx_info *skb_info =
+                               IEEE80211_SKB_CB(tmp);
+
                        num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-                       info = IEEE80211_SKB_CB(tmp);
                        amsdu_add = num_subframes * sizeof(struct ethhdr) +
                                (num_subframes - 1) * (snap_ip_tcp + pad);
-                       info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
+                       skb_info->driver_data[0] =
+                               (void *)(uintptr_t)amsdu_add;
                        skb_shinfo(tmp)->gso_size = mss;
                } else {
                        qc = ieee80211_get_qos_ctl((void *)tmp->data);
@@ -751,6 +758,7 @@ segment:
 }
 #else /* CONFIG_INET */
 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff_head *mpdus_skb)
 {
@@ -794,10 +802,10 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
  * Sets the fields in the Tx cmd that are crypto related
  */
 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+                          struct ieee80211_tx_info *info,
                           struct ieee80211_sta *sta)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_mvm_sta *mvmsta;
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
@@ -818,7 +826,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                return -1;
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
+                                       sta, mvmsta->sta_id);
        if (!dev_cmd)
                goto drop;
 
@@ -918,7 +927,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
                   struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info info;
        struct sk_buff_head mpdus_skbs;
        unsigned int payload_len;
        int ret;
@@ -929,21 +939,23 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                return -1;
 
+       memcpy(&info, skb->cb, sizeof(info));
+
        /* This holds the amsdu headers length */
-       info->driver_data[0] = (void *)(uintptr_t)0;
+       skb_info->driver_data[0] = (void *)(uintptr_t)0;
 
        if (!skb_is_gso(skb))
-               return iwl_mvm_tx_mpdu(mvm, skb, sta);
+               return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 
        payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
                tcp_hdrlen(skb) + skb->data_len;
 
        if (payload_len <= skb_shinfo(skb)->gso_size)
-               return iwl_mvm_tx_mpdu(mvm, skb, sta);
+               return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 
        __skb_queue_head_init(&mpdus_skbs);
 
-       ret = iwl_mvm_tx_tso(mvm, skb, sta, &mpdus_skbs);
+       ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
        if (ret)
                return ret;
 
@@ -953,7 +965,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        while (!skb_queue_empty(&mpdus_skbs)) {
                skb = __skb_dequeue(&mpdus_skbs);
 
-               ret = iwl_mvm_tx_mpdu(mvm, skb, sta);
+               ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
                if (ret) {
                        __skb_queue_purge(&mpdus_skbs);
                        return ret;
index e346e8125ef5d4d1797199853f7247043251ee6c..11e02be9db1a0b1ed5e6116bb2d1c3dd3b68fda4 100644 (file)
@@ -1,3 +1,3 @@
 obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
 
-xen-netback-y := netback.o xenbus.o interface.o
+xen-netback-y := netback.o xenbus.o interface.o hash.o
index f44b388464208d25b4d11613a2f8b16862256ec4..84d6cbdd11b2d870c9e6be8a470402bd84ebfb8b 100644 (file)
@@ -220,6 +220,35 @@ struct xenvif_mcast_addr {
 
 #define XEN_NETBK_MCAST_MAX 64
 
+#define XEN_NETBK_MAX_HASH_KEY_SIZE 40
+#define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
+#define XEN_NETBK_HASH_TAG_SIZE 40
+
+struct xenvif_hash_cache_entry {
+       struct list_head link;
+       struct rcu_head rcu;
+       u8 tag[XEN_NETBK_HASH_TAG_SIZE];
+       unsigned int len;
+       u32 val;
+       int seq;
+};
+
+struct xenvif_hash_cache {
+       spinlock_t lock;
+       struct list_head list;
+       unsigned int count;
+       atomic_t seq;
+};
+
+struct xenvif_hash {
+       unsigned int alg;
+       u32 flags;
+       u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
+       u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+       unsigned int size;
+       struct xenvif_hash_cache cache;
+};
+
 struct xenvif {
        /* Unique identifier for this interface. */
        domid_t          domid;
@@ -251,6 +280,8 @@ struct xenvif {
        unsigned int num_queues; /* active queues, resource allocated */
        unsigned int stalled_queues;
 
+       struct xenvif_hash hash;
+
        struct xenbus_watch credit_watch;
        struct xenbus_watch mcast_ctrl_watch;
 
@@ -260,6 +291,11 @@ struct xenvif {
        struct dentry *xenvif_dbg_root;
 #endif
 
+       struct xen_netif_ctrl_back_ring ctrl;
+       struct task_struct *ctrl_task;
+       wait_queue_head_t ctrl_wq;
+       unsigned int ctrl_irq;
+
        /* Miscellaneous private stuff. */
        struct net_device *dev;
 };
@@ -285,10 +321,15 @@ struct xenvif *xenvif_alloc(struct device *parent,
 int xenvif_init_queue(struct xenvif_queue *queue);
 void xenvif_deinit_queue(struct xenvif_queue *queue);
 
-int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
-                  unsigned long rx_ring_ref, unsigned int tx_evtchn,
-                  unsigned int rx_evtchn);
-void xenvif_disconnect(struct xenvif *vif);
+int xenvif_connect_data(struct xenvif_queue *queue,
+                       unsigned long tx_ring_ref,
+                       unsigned long rx_ring_ref,
+                       unsigned int tx_evtchn,
+                       unsigned int rx_evtchn);
+void xenvif_disconnect_data(struct xenvif *vif);
+int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+                       unsigned int evtchn);
+void xenvif_disconnect_ctrl(struct xenvif *vif);
 void xenvif_free(struct xenvif *vif);
 
 int xenvif_xenbus_init(void);
@@ -300,10 +341,10 @@ int xenvif_queue_stopped(struct xenvif_queue *queue);
 void xenvif_wake_queue(struct xenvif_queue *queue);
 
 /* (Un)Map communication rings. */
-void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
-int xenvif_map_frontend_rings(struct xenvif_queue *queue,
-                             grant_ref_t tx_ring_ref,
-                             grant_ref_t rx_ring_ref);
+void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
+int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+                                  grant_ref_t tx_ring_ref,
+                                  grant_ref_t rx_ring_ref);
 
 /* Check for SKBs from frontend and schedule backend processing */
 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
@@ -318,6 +359,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
 
 int xenvif_dealloc_kthread(void *data);
 
+int xenvif_ctrl_kthread(void *data);
+
 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
 
 void xenvif_carrier_on(struct xenvif *vif);
@@ -341,6 +384,7 @@ extern bool separate_tx_rx_irq;
 extern unsigned int rx_drain_timeout_msecs;
 extern unsigned int rx_stall_timeout_msecs;
 extern unsigned int xenvif_max_queues;
+extern unsigned int xenvif_hash_cache_size;
 
 #ifdef CONFIG_DEBUG_FS
 extern struct dentry *xen_netback_dbg_root;
@@ -354,4 +398,18 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
 bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
 void xenvif_mcast_addr_list_free(struct xenvif *vif);
 
+/* Hash */
+void xenvif_init_hash(struct xenvif *vif);
+void xenvif_deinit_hash(struct xenvif *vif);
+
+u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
+u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
+u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
+u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
+u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
+u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+                           u32 off);
+
+void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
+
 #endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
new file mode 100644 (file)
index 0000000..392e392
--- /dev/null
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2016 Citrix Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Softare Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define XEN_NETIF_DEFINE_TOEPLITZ
+
+#include "common.h"
+#include <linux/vmalloc.h>
+#include <linux/rculist.h>
+
+static void xenvif_del_hash(struct rcu_head *rcu)
+{
+       struct xenvif_hash_cache_entry *entry;
+
+       entry = container_of(rcu, struct xenvif_hash_cache_entry, rcu);
+
+       kfree(entry);
+}
+
+static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
+                           unsigned int len, u32 val)
+{
+       struct xenvif_hash_cache_entry *new, *entry, *oldest;
+       unsigned long flags;
+       bool found;
+
+       new = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!new)
+               return;
+
+       memcpy(new->tag, tag, len);
+       new->len = len;
+       new->val = val;
+
+       spin_lock_irqsave(&vif->hash.cache.lock, flags);
+
+       found = false;
+       oldest = NULL;
+       list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+               /* Make sure we don't add duplicate entries */
+               if (entry->len == len &&
+                   memcmp(entry->tag, tag, len) == 0)
+                       found = true;
+               if (!oldest || entry->seq < oldest->seq)
+                       oldest = entry;
+       }
+
+       if (!found) {
+               new->seq = atomic_inc_return(&vif->hash.cache.seq);
+               list_add_rcu(&new->link, &vif->hash.cache.list);
+
+               if (++vif->hash.cache.count > xenvif_hash_cache_size) {
+                       list_del_rcu(&oldest->link);
+                       vif->hash.cache.count--;
+                       call_rcu(&oldest->rcu, xenvif_del_hash);
+               }
+       }
+
+       spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
+
+       if (found)
+               kfree(new);
+}
+
+static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
+                          unsigned int len)
+{
+       u32 val;
+
+       val = xen_netif_toeplitz_hash(vif->hash.key,
+                                     sizeof(vif->hash.key),
+                                     data, len);
+
+       if (xenvif_hash_cache_size != 0)
+               xenvif_add_hash(vif, data, len, val);
+
+       return val;
+}
+
+static void xenvif_flush_hash(struct xenvif *vif)
+{
+       struct xenvif_hash_cache_entry *entry;
+       unsigned long flags;
+
+       if (xenvif_hash_cache_size == 0)
+               return;
+
+       spin_lock_irqsave(&vif->hash.cache.lock, flags);
+
+       list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+               list_del_rcu(&entry->link);
+               vif->hash.cache.count--;
+               call_rcu(&entry->rcu, xenvif_del_hash);
+       }
+
+       spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
+}
+
+static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
+                           unsigned int len)
+{
+       struct xenvif_hash_cache_entry *entry;
+       u32 val;
+       bool found;
+
+       if (len >= XEN_NETBK_HASH_TAG_SIZE)
+               return 0;
+
+       if (xenvif_hash_cache_size == 0)
+               return xenvif_new_hash(vif, data, len);
+
+       rcu_read_lock();
+
+       found = false;
+
+       list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+               if (entry->len == len &&
+                   memcmp(entry->tag, data, len) == 0) {
+                       val = entry->val;
+                       entry->seq = atomic_inc_return(&vif->hash.cache.seq);
+                       found = true;
+                       break;
+               }
+       }
+
+       rcu_read_unlock();
+
+       if (!found)
+               val = xenvif_new_hash(vif, data, len);
+
+       return val;
+}
+
+void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
+{
+       struct flow_keys flow;
+       u32 hash = 0;
+       enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
+       u32 flags = vif->hash.flags;
+       bool has_tcp_hdr;
+
+       /* Quick rejection test: If the network protocol doesn't
+        * correspond to any enabled hash type then there's no point
+        * in parsing the packet header.
+        */
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+                            XEN_NETIF_CTRL_HASH_TYPE_IPV4))
+                       break;
+
+               goto done;
+
+       case htons(ETH_P_IPV6):
+               if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
+                            XEN_NETIF_CTRL_HASH_TYPE_IPV6))
+                       break;
+
+               goto done;
+
+       default:
+               goto done;
+       }
+
+       memset(&flow, 0, sizeof(flow));
+       if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+               goto done;
+
+       has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
+                     !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
+
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               if (has_tcp_hdr &&
+                   (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
+                       u8 data[12];
+
+                       memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
+                       memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
+                       memcpy(&data[8], &flow.ports.src, 2);
+                       memcpy(&data[10], &flow.ports.dst, 2);
+
+                       hash = xenvif_find_hash(vif, data, sizeof(data));
+                       type = PKT_HASH_TYPE_L4;
+               } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
+                       u8 data[8];
+
+                       memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
+                       memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
+
+                       hash = xenvif_find_hash(vif, data, sizeof(data));
+                       type = PKT_HASH_TYPE_L3;
+               }
+
+               break;
+
+       case htons(ETH_P_IPV6):
+               if (has_tcp_hdr &&
+                   (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
+                       u8 data[36];
+
+                       memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
+                       memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
+                       memcpy(&data[32], &flow.ports.src, 2);
+                       memcpy(&data[34], &flow.ports.dst, 2);
+
+                       hash = xenvif_find_hash(vif, data, sizeof(data));
+                       type = PKT_HASH_TYPE_L4;
+               } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
+                       u8 data[32];
+
+                       memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
+                       memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
+
+                       hash = xenvif_find_hash(vif, data, sizeof(data));
+                       type = PKT_HASH_TYPE_L3;
+               }
+
+               break;
+       }
+
+done:
+       if (type == PKT_HASH_TYPE_NONE)
+               skb_clear_hash(skb);
+       else
+               __skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
+}
+
+u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
+{
+       switch (alg) {
+       case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
+       case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
+               break;
+
+       default:
+               return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+       }
+
+       vif->hash.alg = alg;
+
+       return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
+{
+       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+               return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
+
+       *flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
+                XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+                XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
+                XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+
+       return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
+{
+       if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
+                     XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+                     XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
+                     XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
+               return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+               return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       vif->hash.flags = flags;
+
+       return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
+{
+       u8 *key = vif->hash.key;
+       struct gnttab_copy copy_op = {
+               .source.u.ref = gref,
+               .source.domid = vif->domid,
+               .dest.u.gmfn = virt_to_gfn(key),
+               .dest.domid = DOMID_SELF,
+               .dest.offset = xen_offset_in_page(key),
+               .len = len,
+               .flags = GNTCOPY_source_gref
+       };
+
+       if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
+               return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       if (len != 0) {
+               gnttab_batch_copy(&copy_op, 1);
+
+               if (copy_op.status != GNTST_okay)
+                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+       }
+
+       /* Clear any remaining key octets */
+       if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
+               memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
+
+       xenvif_flush_hash(vif);
+
+       return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
+{
+       if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
+               return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       vif->hash.size = size;
+       memset(vif->hash.mapping, 0, sizeof(u32) * size);
+
+       return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+                           u32 off)
+{
+       u32 *mapping = &vif->hash.mapping[off];
+       struct gnttab_copy copy_op = {
+               .source.u.ref = gref,
+               .source.domid = vif->domid,
+               .dest.u.gmfn = virt_to_gfn(mapping),
+               .dest.domid = DOMID_SELF,
+               .dest.offset = xen_offset_in_page(mapping),
+               .len = len * sizeof(u32),
+               .flags = GNTCOPY_source_gref
+       };
+
+       if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+               return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       while (len-- != 0)
+               if (mapping[off++] >= vif->num_queues)
+                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       if (len != 0) {
+               gnttab_batch_copy(&copy_op, 1);
+
+               if (copy_op.status != GNTST_okay)
+                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+       }
+
+       return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+void xenvif_init_hash(struct xenvif *vif)
+{
+       if (xenvif_hash_cache_size == 0)
+               return;
+
+       spin_lock_init(&vif->hash.cache.lock);
+       INIT_LIST_HEAD(&vif->hash.cache.list);
+}
+
+void xenvif_deinit_hash(struct xenvif *vif)
+{
+       xenvif_flush_hash(vif);
+}
index f5231a2dd2ac961089029c8acd40f97c3cb7991b..1c7f49b5acc17805f5983ed41839472fa8c8d5d8 100644 (file)
@@ -128,6 +128,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
+{
+       struct xenvif *vif = dev_id;
+
+       wake_up(&vif->ctrl_wq);
+
+       return IRQ_HANDLED;
+}
+
 int xenvif_queue_stopped(struct xenvif_queue *queue)
 {
        struct net_device *dev = queue->vif->dev;
@@ -142,6 +151,33 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
        netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
 }
 
+static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+                              void *accel_priv,
+                              select_queue_fallback_t fallback)
+{
+       struct xenvif *vif = netdev_priv(dev);
+       unsigned int size = vif->hash.size;
+
+       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
+               u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
+
+               /* Make sure there is no hash information in the socket
+                * buffer otherwise it would be incorrectly forwarded
+                * to the frontend.
+                */
+               skb_clear_hash(skb);
+
+               return index;
+       }
+
+       xenvif_set_skb_hash(vif, skb);
+
+       if (size == 0)
+               return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
+
+       return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+}
+
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
@@ -386,6 +422,7 @@ static const struct ethtool_ops xenvif_ethtool_ops = {
 };
 
 static const struct net_device_ops xenvif_netdev_ops = {
+       .ndo_select_queue = xenvif_select_queue,
        .ndo_start_xmit = xenvif_start_xmit,
        .ndo_get_stats  = xenvif_get_stats,
        .ndo_open       = xenvif_open,
@@ -527,9 +564,69 @@ void xenvif_carrier_on(struct xenvif *vif)
        rtnl_unlock();
 }
 
-int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
-                  unsigned long rx_ring_ref, unsigned int tx_evtchn,
-                  unsigned int rx_evtchn)
+int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+                       unsigned int evtchn)
+{
+       struct net_device *dev = vif->dev;
+       void *addr;
+       struct xen_netif_ctrl_sring *shared;
+       struct task_struct *task;
+       int err = -ENOMEM;
+
+       err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+                                    &ring_ref, 1, &addr);
+       if (err)
+               goto err;
+
+       shared = (struct xen_netif_ctrl_sring *)addr;
+       BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
+
+       init_waitqueue_head(&vif->ctrl_wq);
+
+       err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
+                                                   xenvif_ctrl_interrupt,
+                                                   0, dev->name, vif);
+       if (err < 0)
+               goto err_unmap;
+
+       vif->ctrl_irq = err;
+
+       xenvif_init_hash(vif);
+
+       task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
+                             "%s-control", dev->name);
+       if (IS_ERR(task)) {
+               pr_warn("Could not allocate kthread for %s\n", dev->name);
+               err = PTR_ERR(task);
+               goto err_deinit;
+       }
+
+       get_task_struct(task);
+       vif->ctrl_task = task;
+
+       wake_up_process(vif->ctrl_task);
+
+       return 0;
+
+err_deinit:
+       xenvif_deinit_hash(vif);
+       unbind_from_irqhandler(vif->ctrl_irq, vif);
+       vif->ctrl_irq = 0;
+
+err_unmap:
+       xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+                               vif->ctrl.sring);
+       vif->ctrl.sring = NULL;
+
+err:
+       return err;
+}
+
+int xenvif_connect_data(struct xenvif_queue *queue,
+                       unsigned long tx_ring_ref,
+                       unsigned long rx_ring_ref,
+                       unsigned int tx_evtchn,
+                       unsigned int rx_evtchn)
 {
        struct task_struct *task;
        int err = -ENOMEM;
@@ -538,7 +635,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
        BUG_ON(queue->task);
        BUG_ON(queue->dealloc_task);
 
-       err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
+       err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
+                                            rx_ring_ref);
        if (err < 0)
                goto err;
 
@@ -614,7 +712,7 @@ err_tx_unbind:
        unbind_from_irqhandler(queue->tx_irq, queue);
        queue->tx_irq = 0;
 err_unmap:
-       xenvif_unmap_frontend_rings(queue);
+       xenvif_unmap_frontend_data_rings(queue);
        netif_napi_del(&queue->napi);
 err:
        module_put(THIS_MODULE);
@@ -634,7 +732,7 @@ void xenvif_carrier_off(struct xenvif *vif)
        rtnl_unlock();
 }
 
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_disconnect_data(struct xenvif *vif)
 {
        struct xenvif_queue *queue = NULL;
        unsigned int num_queues = vif->num_queues;
@@ -668,12 +766,34 @@ void xenvif_disconnect(struct xenvif *vif)
                        queue->tx_irq = 0;
                }
 
-               xenvif_unmap_frontend_rings(queue);
+               xenvif_unmap_frontend_data_rings(queue);
        }
 
        xenvif_mcast_addr_list_free(vif);
 }
 
+void xenvif_disconnect_ctrl(struct xenvif *vif)
+{
+       if (vif->ctrl_task) {
+               kthread_stop(vif->ctrl_task);
+               put_task_struct(vif->ctrl_task);
+               vif->ctrl_task = NULL;
+       }
+
+       xenvif_deinit_hash(vif);
+
+       if (vif->ctrl_irq) {
+               unbind_from_irqhandler(vif->ctrl_irq, vif);
+               vif->ctrl_irq = 0;
+       }
+
+       if (vif->ctrl.sring) {
+               xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+                                       vif->ctrl.sring);
+               vif->ctrl.sring = NULL;
+       }
+}
+
 /* Reverse the relevant parts of xenvif_init_queue().
  * Used for queue teardown from xenvif_free(), and on the
  * error handling paths in xenbus.c:connect().
index b42f26029225fd69d9c57fe3088b31fdb5ab39bb..edbae0b1e8f0ed8e9be929767c01cecf838fa5fd 100644 (file)
@@ -89,6 +89,11 @@ module_param(fatal_skb_slots, uint, 0444);
  */
 #define XEN_NETBACK_TX_COPY_LEN 128
 
+/* This is the maximum number of flows in the hash cache. */
+#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
+unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
+module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
+MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
 
 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
                               u8 status);
@@ -163,6 +168,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
        needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
        if (skb_is_gso(skb))
                needed++;
+       if (skb->sw_hash)
+               needed++;
 
        do {
                prod = queue->rx.sring->req_prod;
@@ -280,6 +287,8 @@ struct gop_frag_copy {
        struct xenvif_rx_meta *meta;
        int head;
        int gso_type;
+       int protocol;
+       int hash_present;
 
        struct page *page;
 };
@@ -326,8 +335,15 @@ static void xenvif_setup_copy_gop(unsigned long gfn,
        npo->copy_off += *len;
        info->meta->size += *len;
 
+       if (!info->head)
+               return;
+
        /* Leave a gap for the GSO descriptor. */
-       if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
+       if ((1 << info->gso_type) & queue->vif->gso_mask)
+               queue->rx.req_cons++;
+
+       /* Leave a gap for the hash extra segment. */
+       if (info->hash_present)
                queue->rx.req_cons++;
 
        info->head = 0; /* There must be something in this buffer now */
@@ -362,6 +378,11 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
                .npo = npo,
                .head = *head,
                .gso_type = XEN_NETIF_GSO_TYPE_NONE,
+               /* xenvif_set_skb_hash() will have either set a s/w
+                * hash or cleared the hash depending on
+                * whether the the frontend wants a hash for this skb.
+                */
+               .hash_present = skb->sw_hash,
        };
        unsigned long bytes;
 
@@ -550,6 +571,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue)
 
 static void xenvif_rx_action(struct xenvif_queue *queue)
 {
+       struct xenvif *vif = queue->vif;
        s8 status;
        u16 flags;
        struct xen_netif_rx_response *resp;
@@ -585,9 +607,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
        gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
+               struct xen_netif_extra_info *extra = NULL;
 
                if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_prefix_mask) {
+                   vif->gso_prefix_mask) {
                        resp = RING_GET_RESPONSE(&queue->rx,
                                                 queue->rx.rsp_prod_pvt++);
 
@@ -605,7 +628,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                queue->stats.tx_bytes += skb->len;
                queue->stats.tx_packets++;
 
-               status = xenvif_check_gop(queue->vif,
+               status = xenvif_check_gop(vif,
                                          XENVIF_RX_CB(skb)->meta_slots_used,
                                          &npo);
 
@@ -627,21 +650,57 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                                        flags);
 
                if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_mask) {
-                       struct xen_netif_extra_info *gso =
-                               (struct xen_netif_extra_info *)
+                   vif->gso_mask) {
+                       extra = (struct xen_netif_extra_info *)
                                RING_GET_RESPONSE(&queue->rx,
                                                  queue->rx.rsp_prod_pvt++);
 
                        resp->flags |= XEN_NETRXF_extra_info;
 
-                       gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
-                       gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
-                       gso->u.gso.pad = 0;
-                       gso->u.gso.features = 0;
+                       extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+                       extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+                       extra->u.gso.pad = 0;
+                       extra->u.gso.features = 0;
 
-                       gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-                       gso->flags = 0;
+                       extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+                       extra->flags = 0;
+               }
+
+               if (skb->sw_hash) {
+                       /* Since the skb got here via xenvif_select_queue()
+                        * we know that the hash has been re-calculated
+                        * according to a configuration set by the frontend
+                        * and therefore we know that it is legitimate to
+                        * pass it to the frontend.
+                        */
+                       if (resp->flags & XEN_NETRXF_extra_info)
+                               extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+                       else
+                               resp->flags |= XEN_NETRXF_extra_info;
+
+                       extra = (struct xen_netif_extra_info *)
+                               RING_GET_RESPONSE(&queue->rx,
+                                                 queue->rx.rsp_prod_pvt++);
+
+                       extra->u.hash.algorithm =
+                               XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
+
+                       if (skb->l4_hash)
+                               extra->u.hash.type =
+                                       skb->protocol == htons(ETH_P_IP) ?
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+                       else
+                               extra->u.hash.type =
+                                       skb->protocol == htons(ETH_P_IP) ?
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
+
+                       *(uint32_t *)extra->u.hash.value =
+                               skb_get_hash_raw(skb);
+
+                       extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+                       extra->flags = 0;
                }
 
                xenvif_add_frag_responses(queue, status,
@@ -711,6 +770,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
                if (cons == end)
                        break;
                RING_COPY_REQUEST(&queue->tx, cons++, txp);
+               extra_count = 0; /* only the first frag can have extras */
        } while (1);
        queue->tx.req_cons = cons;
 }
@@ -1450,6 +1510,33 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        }
                }
 
+               if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
+                       struct xen_netif_extra_info *extra;
+                       enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
+
+                       extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
+
+                       switch (extra->u.hash.type) {
+                       case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
+                       case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
+                               type = PKT_HASH_TYPE_L3;
+                               break;
+
+                       case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
+                       case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
+                               type = PKT_HASH_TYPE_L4;
+                               break;
+
+                       default:
+                               break;
+                       }
+
+                       if (type != PKT_HASH_TYPE_NONE)
+                               skb_set_hash(skb,
+                                            *(u32 *)extra->u.hash.value,
+                                            type);
+               }
+
                XENVIF_TX_CB(skb)->pending_idx = pending_idx;
 
                __skb_put(skb, data_len);
@@ -1925,7 +2012,7 @@ static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
        return queue->dealloc_cons != queue->dealloc_prod;
 }
 
-void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
+void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
 {
        if (queue->tx.sring)
                xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
@@ -1935,9 +2022,9 @@ void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
                                        queue->rx.sring);
 }
 
-int xenvif_map_frontend_rings(struct xenvif_queue *queue,
-                             grant_ref_t tx_ring_ref,
-                             grant_ref_t rx_ring_ref)
+int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+                                  grant_ref_t tx_ring_ref,
+                                  grant_ref_t rx_ring_ref)
 {
        void *addr;
        struct xen_netif_tx_sring *txs;
@@ -1964,7 +2051,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
        return 0;
 
 err:
-       xenvif_unmap_frontend_rings(queue);
+       xenvif_unmap_frontend_data_rings(queue);
        return err;
 }
 
@@ -2163,6 +2250,135 @@ int xenvif_dealloc_kthread(void *data)
        return 0;
 }
 
+static void make_ctrl_response(struct xenvif *vif,
+                              const struct xen_netif_ctrl_request *req,
+                              u32 status, u32 data)
+{
+       RING_IDX idx = vif->ctrl.rsp_prod_pvt;
+       struct xen_netif_ctrl_response rsp = {
+               .id = req->id,
+               .type = req->type,
+               .status = status,
+               .data = data,
+       };
+
+       *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
+       vif->ctrl.rsp_prod_pvt = ++idx;
+}
+
+static void push_ctrl_response(struct xenvif *vif)
+{
+       int notify;
+
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
+       if (notify)
+               notify_remote_via_irq(vif->ctrl_irq);
+}
+
+static void process_ctrl_request(struct xenvif *vif,
+                                const struct xen_netif_ctrl_request *req)
+{
+       u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
+       u32 data = 0;
+
+       switch (req->type) {
+       case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
+               status = xenvif_set_hash_alg(vif, req->data[0]);
+               break;
+
+       case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
+               status = xenvif_get_hash_flags(vif, &data);
+               break;
+
+       case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
+               status = xenvif_set_hash_flags(vif, req->data[0]);
+               break;
+
+       case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
+               status = xenvif_set_hash_key(vif, req->data[0],
+                                            req->data[1]);
+               break;
+
+       case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
+               status = XEN_NETIF_CTRL_STATUS_SUCCESS;
+               data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
+               break;
+
+       case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
+               status = xenvif_set_hash_mapping_size(vif,
+                                                     req->data[0]);
+               break;
+
+       case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
+               status = xenvif_set_hash_mapping(vif, req->data[0],
+                                                req->data[1],
+                                                req->data[2]);
+               break;
+
+       default:
+               break;
+       }
+
+       make_ctrl_response(vif, req, status, data);
+       push_ctrl_response(vif);
+}
+
+static void xenvif_ctrl_action(struct xenvif *vif)
+{
+       for (;;) {
+               RING_IDX req_prod, req_cons;
+
+               req_prod = vif->ctrl.sring->req_prod;
+               req_cons = vif->ctrl.req_cons;
+
+               /* Make sure we can see requests before we process them. */
+               rmb();
+
+               if (req_cons == req_prod)
+                       break;
+
+               while (req_cons != req_prod) {
+                       struct xen_netif_ctrl_request req;
+
+                       RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
+                       req_cons++;
+
+                       process_ctrl_request(vif, &req);
+               }
+
+               vif->ctrl.req_cons = req_cons;
+               vif->ctrl.sring->req_event = req_cons + 1;
+       }
+}
+
+static bool xenvif_ctrl_work_todo(struct xenvif *vif)
+{
+       if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
+               return 1;
+
+       return 0;
+}
+
+int xenvif_ctrl_kthread(void *data)
+{
+       struct xenvif *vif = data;
+
+       for (;;) {
+               wait_event_interruptible(vif->ctrl_wq,
+                                        xenvif_ctrl_work_todo(vif) ||
+                                        kthread_should_stop());
+               if (kthread_should_stop())
+                       break;
+
+               while (xenvif_ctrl_work_todo(vif))
+                       xenvif_ctrl_action(vif);
+
+               cond_resched();
+       }
+
+       return 0;
+}
+
 static int __init netback_init(void)
 {
        int rc = 0;
index bd182cd55dda87c76b3984bbec8cfef493284dd1..6a31f2610c2378185c4274cd9828cd5117ca61ea 100644 (file)
@@ -38,7 +38,8 @@ struct backend_info {
        const char *hotplug_script;
 };
 
-static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
+static int connect_data_rings(struct backend_info *be,
+                             struct xenvif_queue *queue);
 static void connect(struct backend_info *be);
 static int read_xenbus_vif_flags(struct backend_info *be);
 static int backend_create_xenvif(struct backend_info *be);
@@ -367,6 +368,12 @@ static int netback_probe(struct xenbus_device *dev,
        if (err)
                pr_debug("Error writing multi-queue-max-queues\n");
 
+       err = xenbus_printf(XBT_NIL, dev->nodename,
+                           "feature-ctrl-ring",
+                           "%u", true);
+       if (err)
+               pr_debug("Error writing feature-ctrl-ring\n");
+
        script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
        if (IS_ERR(script)) {
                err = PTR_ERR(script);
@@ -457,7 +464,8 @@ static void backend_disconnect(struct backend_info *be)
 #ifdef CONFIG_DEBUG_FS
                xenvif_debugfs_delif(be->vif);
 #endif /* CONFIG_DEBUG_FS */
-               xenvif_disconnect(be->vif);
+               xenvif_disconnect_data(be->vif);
+               xenvif_disconnect_ctrl(be->vif);
        }
 }
 
@@ -825,6 +833,48 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
        kfree(str);
 }
 
+static int connect_ctrl_ring(struct backend_info *be)
+{
+       struct xenbus_device *dev = be->dev;
+       struct xenvif *vif = be->vif;
+       unsigned int val;
+       grant_ref_t ring_ref;
+       unsigned int evtchn;
+       int err;
+
+       err = xenbus_gather(XBT_NIL, dev->otherend,
+                           "ctrl-ring-ref", "%u", &val, NULL);
+       if (err)
+               goto done; /* The frontend does not have a control ring */
+
+       ring_ref = val;
+
+       err = xenbus_gather(XBT_NIL, dev->otherend,
+                           "event-channel-ctrl", "%u", &val, NULL);
+       if (err) {
+               xenbus_dev_fatal(dev, err,
+                                "reading %s/event-channel-ctrl",
+                                dev->otherend);
+               goto fail;
+       }
+
+       evtchn = val;
+
+       err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
+       if (err) {
+               xenbus_dev_fatal(dev, err,
+                                "mapping shared-frame %u port %u",
+                                ring_ref, evtchn);
+               goto fail;
+       }
+
+done:
+       return 0;
+
+fail:
+       return err;
+}
+
 static void connect(struct backend_info *be)
 {
        int err;
@@ -861,6 +911,12 @@ static void connect(struct backend_info *be)
        xen_register_watchers(dev, be->vif);
        read_xenbus_vif_flags(be);
 
+       err = connect_ctrl_ring(be);
+       if (err) {
+               xenbus_dev_fatal(dev, err, "connecting control ring");
+               return;
+       }
+
        /* Use the number of queues requested by the frontend */
        be->vif->queues = vzalloc(requested_num_queues *
                                  sizeof(struct xenvif_queue));
@@ -896,11 +952,12 @@ static void connect(struct backend_info *be)
                queue->remaining_credit = credit_bytes;
                queue->credit_usec = credit_usec;
 
-               err = connect_rings(be, queue);
+               err = connect_data_rings(be, queue);
                if (err) {
-                       /* connect_rings() cleans up after itself on failure,
-                        * but we need to clean up after xenvif_init_queue() here,
-                        * and also clean up any previously initialised queues.
+                       /* connect_data_rings() cleans up after itself on
+                        * failure, but we need to clean up after
+                        * xenvif_init_queue() here, and also clean up any
+                        * previously initialised queues.
                         */
                        xenvif_deinit_queue(queue);
                        be->vif->num_queues = queue_index;
@@ -935,15 +992,17 @@ static void connect(struct backend_info *be)
 
 err:
        if (be->vif->num_queues > 0)
-               xenvif_disconnect(be->vif); /* Clean up existing queues */
+               xenvif_disconnect_data(be->vif); /* Clean up existing queues */
        vfree(be->vif->queues);
        be->vif->queues = NULL;
        be->vif->num_queues = 0;
+       xenvif_disconnect_ctrl(be->vif);
        return;
 }
 
 
-static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
+static int connect_data_rings(struct backend_info *be,
+                             struct xenvif_queue *queue)
 {
        struct xenbus_device *dev = be->dev;
        unsigned int num_queues = queue->vif->num_queues;
@@ -1007,8 +1066,8 @@ static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
        }
 
        /* Map the shared frame, irq etc. */
-       err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
-                            tx_evtchn, rx_evtchn);
+       err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
+                                 tx_evtchn, rx_evtchn);
        if (err) {
                xenbus_dev_fatal(dev, err,
                                 "mapping shared-frames %lu/%lu port tx %u rx %u",
index 5101f3ab4f296a2edc0a290fc4ab87a3ba12f9bb..92f536596b24295f0b19aea551844eecb95f7df6 100644 (file)
@@ -402,9 +402,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
 
                /*
                 * vmemmap_populate_hugepages() allocates the memmap array in
-                * HPAGE_SIZE chunks.
+                * PMD_SIZE chunks.
                 */
-               memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
+               memmap_size = ALIGN(64 * npfns, PMD_SIZE);
                offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
                        - start;
        } else if (nd_pfn->mode == PFN_MODE_RAM)
index 6c9f5467bc5f84e65fbe6e55d0e03693d260e535..dd7cdbee8029d5a51a60c63bb24a5772c0b84de9 100644 (file)
@@ -294,7 +294,7 @@ void pci_bus_add_device(struct pci_dev *dev)
 
        dev->match_driver = true;
        retval = device_attach(&dev->dev);
-       if (retval < 0) {
+       if (retval < 0 && retval != -EPROBE_DEFER) {
                dev_warn(&dev->dev, "device attach failed (%d)\n", retval);
                pci_proc_detach_device(dev);
                pci_remove_sysfs_dev_files(dev);
@@ -324,7 +324,9 @@ void pci_bus_add_devices(const struct pci_bus *bus)
        }
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
-               BUG_ON(!dev->is_added);
+               /* Skip if device attach failed */
+               if (!dev->is_added)
+                       continue;
                child = dev->subordinate;
                if (child)
                        pci_bus_add_devices(child);
index 4429312e848dba2af05ddb3d9da6cb3b2b176248..2c447130b954fa15421858a03dfe7cdf887de80c 100644 (file)
@@ -722,9 +722,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
                        break;
                case PIN_CONFIG_BIAS_PULL_UP:
                        conf |= ATMEL_PIO_PUEN_MASK;
+                       conf &= (~ATMEL_PIO_PDEN_MASK);
                        break;
                case PIN_CONFIG_BIAS_PULL_DOWN:
                        conf |= ATMEL_PIO_PDEN_MASK;
+                       conf &= (~ATMEL_PIO_PUEN_MASK);
                        break;
                case PIN_CONFIG_DRIVE_OPEN_DRAIN:
                        if (arg == 0)
index 40cd894e4df5e3d11f26442692dd9c0076d932af..514a5e8fdbab3e757b470daa6e579ebe489fa0f7 100644 (file)
@@ -157,7 +157,9 @@ static struct regulator_ops axp20x_ops_sw = {
 static const struct regulator_linear_range axp20x_ldo4_ranges[] = {
        REGULATOR_LINEAR_RANGE(1250000, 0x0, 0x0, 0),
        REGULATOR_LINEAR_RANGE(1300000, 0x1, 0x8, 100000),
-       REGULATOR_LINEAR_RANGE(2500000, 0x9, 0xf, 100000),
+       REGULATOR_LINEAR_RANGE(2500000, 0x9, 0x9, 0),
+       REGULATOR_LINEAR_RANGE(2700000, 0xa, 0xb, 100000),
+       REGULATOR_LINEAR_RANGE(3000000, 0xc, 0xf, 100000),
 };
 
 static const struct regulator_desc axp20x_regulators[] = {
@@ -215,10 +217,14 @@ static const struct regulator_desc axp22x_regulators[] = {
                 AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
        AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
                 AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
-       AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
+       /* Note the datasheet only guarantees reliable operation up to
+        * 3.3V, this needs to be enforced via dts provided constraints */
+       AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
                    AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
                    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
-       AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
+       /* Note the datasheet only guarantees reliable operation up to
+        * 3.3V, this needs to be enforced via dts provided constraints */
+       AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
                    AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
                    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
        AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
index ed9e7e96f8777a291341e1dd45fccc413bee0f43..c6af343f54eac5c59b3e06923e3b4141413b833e 100644 (file)
@@ -900,4 +900,4 @@ module_exit(da9063_regulator_cleanup);
 MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>");
 MODULE_DESCRIPTION("DA9063 regulators driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("paltform:" DA9063_DRVNAME_REGULATORS);
+MODULE_ALIAS("platform:" DA9063_DRVNAME_REGULATORS);
index a8718e98674a273939f20d06627be943aab6aee7..83e89e5d47526c17be6d728fcc7a651f45aec4b0 100644 (file)
@@ -162,6 +162,8 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
        of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
 
        config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
+       if (config->enable_gpio == -EPROBE_DEFER)
+               return ERR_PTR(-EPROBE_DEFER);
 
        /* Fetch GPIOs. - optional property*/
        ret = of_gpio_count(np);
index d24e2c783dc5c9a093220c4e98980813ad9e309f..6dfa3502e1f1a5ed7532d15157856c30e8d3bb7c 100644 (file)
@@ -308,7 +308,7 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
-#define regulator_desc_s2mps11_buck6_10(num, min, step) {      \
+#define regulator_desc_s2mps11_buck67810(num, min, step) {     \
        .name           = "BUCK"#num,                           \
        .id             = S2MPS11_BUCK##num,                    \
        .ops            = &s2mps11_buck_ops,                    \
@@ -324,6 +324,22 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
+#define regulator_desc_s2mps11_buck9 {                         \
+       .name           = "BUCK9",                              \
+       .id             = S2MPS11_BUCK9,                        \
+       .ops            = &s2mps11_buck_ops,                    \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = MIN_3000_MV,                          \
+       .uV_step        = STEP_25_MV,                           \
+       .n_voltages     = S2MPS11_BUCK9_N_VOLTAGES,             \
+       .ramp_delay     = S2MPS11_RAMP_DELAY,                   \
+       .vsel_reg       = S2MPS11_REG_B9CTRL2,                  \
+       .vsel_mask      = S2MPS11_BUCK9_VSEL_MASK,              \
+       .enable_reg     = S2MPS11_REG_B9CTRL1,                  \
+       .enable_mask    = S2MPS11_ENABLE_MASK                   \
+}
+
 static const struct regulator_desc s2mps11_regulators[] = {
        regulator_desc_s2mps11_ldo(1, STEP_25_MV),
        regulator_desc_s2mps11_ldo(2, STEP_50_MV),
@@ -368,11 +384,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
        regulator_desc_s2mps11_buck1_4(3),
        regulator_desc_s2mps11_buck1_4(4),
        regulator_desc_s2mps11_buck5,
-       regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
-       regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
+       regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
+       regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
+       regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
+       regulator_desc_s2mps11_buck9,
+       regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
 };
 
 static struct regulator_ops s2mps14_reg_ops;
index 8eaed0522aa36e83ddbe259a4fe2293c5ac61579..a655cf29c16f7d40367a859fb11f483e28252219 100644 (file)
@@ -532,6 +532,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                return SCSI_DH_DEV_TEMP_BUSY;
 
  retry:
+       err = 0;
        retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
 
        if (retval) {
index 5d0ec42a9317d6a08a2b99fd424339c4554f4787..634254a523013a557327a7b38825924de7e71b6c 100644 (file)
@@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
        .eh_bus_reset_handler   = qla1280_eh_bus_reset,
        .eh_host_reset_handler  = qla1280_eh_adapter_reset,
        .bios_param             = qla1280_biosparam,
-       .can_queue              = 0xfffff,
+       .can_queue              = MAX_OUTSTANDING_COMMANDS,
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .use_clustering         = ENABLE_CLUSTERING,
index 39412c9097c6a240466c51c941ec890a4612542e..c1a2d747b24686cb835541c68bb2f20f2304d009 100644 (file)
@@ -385,8 +385,8 @@ static int dspi_transfer_one_message(struct spi_master *master,
                dspi->cur_chip = spi_get_ctldata(spi);
                dspi->cs = spi->chip_select;
                dspi->cs_change = 0;
-               if (dspi->cur_transfer->transfer_list.next
-                               == &dspi->cur_msg->transfers)
+               if (list_is_last(&dspi->cur_transfer->transfer_list,
+                                &dspi->cur_msg->transfers) || transfer->cs_change)
                        dspi->cs_change = 1;
                dspi->void_write_data = dspi->cur_chip->void_write_data;
 
index 43a02e377b3b168339013d797f8da7c150959c58..0caa3c8bef46c46e0ed66bf89f518cc5c5236449 100644 (file)
@@ -423,12 +423,16 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
 
        if (mcspi_dma->dma_tx) {
                struct dma_async_tx_descriptor *tx;
+               struct scatterlist sg;
 
                dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
 
-               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
-                                            xfer->tx_sg.nents, DMA_MEM_TO_DEV,
-                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               sg_init_table(&sg, 1);
+               sg_dma_address(&sg) = xfer->tx_dma;
+               sg_dma_len(&sg) = xfer->len;
+
+               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
+               DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
                if (tx) {
                        tx->callback = omap2_mcspi_tx_callback;
                        tx->callback_param = spi;
@@ -474,15 +478,20 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
 
        if (mcspi_dma->dma_rx) {
                struct dma_async_tx_descriptor *tx;
+               struct scatterlist sg;
 
                dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
 
                if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
                        dma_count -= es;
 
-               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl,
-                                            xfer->rx_sg.nents, DMA_DEV_TO_MEM,
-                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               sg_init_table(&sg, 1);
+               sg_dma_address(&sg) = xfer->rx_dma;
+               sg_dma_len(&sg) = dma_count;
+
+               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
+                               DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
+                               DMA_CTRL_ACK);
                if (tx) {
                        tx->callback = omap2_mcspi_rx_callback;
                        tx->callback_param = spi;
@@ -496,6 +505,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
        omap2_mcspi_set_dma_req(spi, 1, 1);
 
        wait_for_completion(&mcspi_dma->dma_rx_completion);
+       dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
+                        DMA_FROM_DEVICE);
 
        if (mcspi->fifo_depth > 0)
                return count;
@@ -608,6 +619,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 
        if (tx != NULL) {
                wait_for_completion(&mcspi_dma->dma_tx_completion);
+               dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
+                                DMA_TO_DEVICE);
 
                if (mcspi->fifo_depth > 0) {
                        irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1074,16 +1087,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
                gpio_free(spi->cs_gpio);
 }
 
-static bool omap2_mcspi_can_dma(struct spi_master *master,
-                               struct spi_device *spi,
-                               struct spi_transfer *xfer)
-{
-       if (xfer->len < DMA_MIN_BYTES)
-               return false;
-
-       return true;
-}
-
 static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
                struct spi_device *spi, struct spi_transfer *t)
 {
@@ -1265,6 +1268,32 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
                return -EINVAL;
        }
 
+       if (len < DMA_MIN_BYTES)
+               goto skip_dma_map;
+
+       if (mcspi_dma->dma_tx && tx_buf != NULL) {
+               t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
+                               len, DMA_TO_DEVICE);
+               if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
+                       dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
+                                       'T', len);
+                       return -EINVAL;
+               }
+       }
+       if (mcspi_dma->dma_rx && rx_buf != NULL) {
+               t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
+                               DMA_FROM_DEVICE);
+               if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
+                       dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
+                                       'R', len);
+                       if (tx_buf != NULL)
+                               dma_unmap_single(mcspi->dev, t->tx_dma,
+                                               len, DMA_TO_DEVICE);
+                       return -EINVAL;
+               }
+       }
+
+skip_dma_map:
        return omap2_mcspi_work_one(mcspi, spi, t);
 }
 
@@ -1348,7 +1377,6 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
        master->transfer_one = omap2_mcspi_transfer_one;
        master->set_cs = omap2_mcspi_set_cs;
        master->cleanup = omap2_mcspi_cleanup;
-       master->can_dma = omap2_mcspi_can_dma;
        master->dev.of_node = node;
        master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
        master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
index 85e59a406a4c01fabef55679c5ffb9b9583bd852..86138e4101b07f990634d06af60b4c5a63ce4607 100644 (file)
@@ -126,7 +126,7 @@ static const struct lpss_config lpss_platforms[] = {
                .reg_general = -1,
                .reg_ssp = 0x20,
                .reg_cs_ctrl = 0x24,
-               .reg_capabilities = 0xfc,
+               .reg_capabilities = -1,
                .rx_threshold = 1,
                .tx_threshold_lo = 32,
                .tx_threshold_hi = 56,
index eac3c960b2decb8c4aa73f73894b16ac3021beba..443f664534e144fd388e2baba7f4d9bb49134772 100644 (file)
@@ -94,6 +94,7 @@ struct ti_qspi {
 #define QSPI_FLEN(n)                   ((n - 1) << 0)
 #define QSPI_WLEN_MAX_BITS             128
 #define QSPI_WLEN_MAX_BYTES            16
+#define QSPI_WLEN_MASK                 QSPI_WLEN(QSPI_WLEN_MAX_BITS)
 
 /* STATUS REGISTER */
 #define BUSY                           0x01
@@ -235,16 +236,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
        return  -ETIMEDOUT;
 }
 
-static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+                         int count)
 {
-       int wlen, count, xfer_len;
+       int wlen, xfer_len;
        unsigned int cmd;
        const u8 *txbuf;
        u32 data;
 
        txbuf = t->tx_buf;
        cmd = qspi->cmd | QSPI_WR_SNGL;
-       count = t->len;
        wlen = t->bits_per_word >> 3;   /* in bytes */
        xfer_len = wlen;
 
@@ -304,9 +305,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        return 0;
 }
 
-static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+                        int count)
 {
-       int wlen, count;
+       int wlen;
        unsigned int cmd;
        u8 *rxbuf;
 
@@ -323,7 +325,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
                cmd |= QSPI_RD_SNGL;
                break;
        }
-       count = t->len;
        wlen = t->bits_per_word >> 3;   /* in bytes */
 
        while (count) {
@@ -354,12 +355,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        return 0;
 }
 
-static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+                            int count)
 {
        int ret;
 
        if (t->tx_buf) {
-               ret = qspi_write_msg(qspi, t);
+               ret = qspi_write_msg(qspi, t, count);
                if (ret) {
                        dev_dbg(qspi->dev, "Error while writing\n");
                        return ret;
@@ -367,7 +369,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        }
 
        if (t->rx_buf) {
-               ret = qspi_read_msg(qspi, t);
+               ret = qspi_read_msg(qspi, t, count);
                if (ret) {
                        dev_dbg(qspi->dev, "Error while reading\n");
                        return ret;
@@ -450,7 +452,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
        struct spi_device *spi = m->spi;
        struct spi_transfer *t;
        int status = 0, ret;
-       int frame_length;
+       unsigned int frame_len_words, transfer_len_words;
+       int wlen;
 
        /* setup device control reg */
        qspi->dc = 0;
@@ -462,14 +465,15 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
        if (spi->mode & SPI_CS_HIGH)
                qspi->dc |= QSPI_CSPOL(spi->chip_select);
 
-       frame_length = (m->frame_length << 3) / spi->bits_per_word;
-
-       frame_length = clamp(frame_length, 0, QSPI_FRAME);
+       frame_len_words = 0;
+       list_for_each_entry(t, &m->transfers, transfer_list)
+               frame_len_words += t->len / (t->bits_per_word >> 3);
+       frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
 
        /* setup command reg */
        qspi->cmd = 0;
        qspi->cmd |= QSPI_EN_CS(spi->chip_select);
-       qspi->cmd |= QSPI_FLEN(frame_length);
+       qspi->cmd |= QSPI_FLEN(frame_len_words);
 
        ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
 
@@ -479,16 +483,23 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
                ti_qspi_disable_memory_map(spi);
 
        list_for_each_entry(t, &m->transfers, transfer_list) {
-               qspi->cmd |= QSPI_WLEN(t->bits_per_word);
+               qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
+                            QSPI_WLEN(t->bits_per_word));
+
+               wlen = t->bits_per_word >> 3;
+               transfer_len_words = min(t->len / wlen, frame_len_words);
 
-               ret = qspi_transfer_msg(qspi, t);
+               ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
                if (ret) {
                        dev_dbg(qspi->dev, "transfer message failed\n");
                        mutex_unlock(&qspi->list_lock);
                        return -EINVAL;
                }
 
-               m->actual_length += t->len;
+               m->actual_length += transfer_len_words * wlen;
+               frame_len_words -= transfer_len_words;
+               if (frame_len_words == 0)
+                       break;
        }
 
        mutex_unlock(&qspi->list_lock);
index feef8a9c4de7cf09bcbc0effb45f83c207c9015b..f02404052b7b66c73809f8bc33c93f07acab4a9b 100644 (file)
@@ -112,7 +112,6 @@ static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
                .sb = inode->i_sb,
        };
        lower_file = ecryptfs_file_to_lower(file);
-       lower_file->f_pos = ctx->pos;
        rc = iterate_dir(lower_file, &buf.ctx);
        ctx->pos = buf.ctx.pos;
        if (rc < 0)
@@ -223,14 +222,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
        }
        ecryptfs_set_file_lower(
                file, ecryptfs_inode_to_private(inode)->lower_file);
-       if (d_is_dir(ecryptfs_dentry)) {
-               ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
-               mutex_lock(&crypt_stat->cs_mutex);
-               crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
-               mutex_unlock(&crypt_stat->cs_mutex);
-               rc = 0;
-               goto out;
-       }
        rc = read_or_initialize_metadata(ecryptfs_dentry);
        if (rc)
                goto out_put;
@@ -247,6 +238,45 @@ out:
        return rc;
 }
 
+/**
+ * ecryptfs_dir_open
+ * @inode: inode speciying file to open
+ * @file: Structure to return filled in
+ *
+ * Opens the file specified by inode.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_dir_open(struct inode *inode, struct file *file)
+{
+       struct dentry *ecryptfs_dentry = file->f_path.dentry;
+       /* Private value of ecryptfs_dentry allocated in
+        * ecryptfs_lookup() */
+       struct ecryptfs_file_info *file_info;
+       struct file *lower_file;
+
+       /* Released in ecryptfs_release or end of function if failure */
+       file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
+       ecryptfs_set_file_private(file, file_info);
+       if (unlikely(!file_info)) {
+               ecryptfs_printk(KERN_ERR,
+                               "Error attempting to allocate memory\n");
+               return -ENOMEM;
+       }
+       lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
+                                file->f_flags, current_cred());
+       if (IS_ERR(lower_file)) {
+               printk(KERN_ERR "%s: Error attempting to initialize "
+                       "the lower file for the dentry with name "
+                       "[%pd]; rc = [%ld]\n", __func__,
+                       ecryptfs_dentry, PTR_ERR(lower_file));
+               kmem_cache_free(ecryptfs_file_info_cache, file_info);
+               return PTR_ERR(lower_file);
+       }
+       ecryptfs_set_file_lower(file, lower_file);
+       return 0;
+}
+
 static int ecryptfs_flush(struct file *file, fl_owner_t td)
 {
        struct file *lower_file = ecryptfs_file_to_lower(file);
@@ -267,6 +297,19 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
        return 0;
 }
 
+static int ecryptfs_dir_release(struct inode *inode, struct file *file)
+{
+       fput(ecryptfs_file_to_lower(file));
+       kmem_cache_free(ecryptfs_file_info_cache,
+                       ecryptfs_file_to_private(file));
+       return 0;
+}
+
+static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+       return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
+}
+
 static int
 ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
@@ -346,20 +389,16 @@ const struct file_operations ecryptfs_dir_fops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
 #endif
-       .open = ecryptfs_open,
-       .flush = ecryptfs_flush,
-       .release = ecryptfs_release,
+       .open = ecryptfs_dir_open,
+       .release = ecryptfs_dir_release,
        .fsync = ecryptfs_fsync,
-       .fasync = ecryptfs_fasync,
-       .splice_read = generic_file_splice_read,
-       .llseek = default_llseek,
+       .llseek = ecryptfs_dir_llseek,
 };
 
 const struct file_operations ecryptfs_main_fops = {
        .llseek = generic_file_llseek,
        .read_iter = ecryptfs_read_update_atime,
        .write_iter = generic_file_write_iter,
-       .iterate = ecryptfs_readdir,
        .unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
index 5384ceb35b1cc829442d6aa440db19c553a6eed8..98b3eb7d8eaf64d5eb006801aeb130ff8503f593 100644 (file)
@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
        int retnamlen = 0;
        int truncate = 0;
        int ret = 0;
+       char *p;
+       int len;
 
        if (!ISOFS_SB(inode->i_sb)->s_rock)
                return 0;
@@ -267,12 +269,17 @@ repeat:
                                        rr->u.NM.flags);
                                break;
                        }
-                       if ((strlen(retname) + rr->len - 5) >= 254) {
+                       len = rr->len - 5;
+                       if (retnamlen + len >= 254) {
                                truncate = 1;
                                break;
                        }
-                       strncat(retname, rr->u.NM.name, rr->len - 5);
-                       retnamlen += rr->len - 5;
+                       p = memchr(rr->u.NM.name, '\0', len);
+                       if (unlikely(p))
+                               len = p - rr->u.NM.name;
+                       memcpy(retname + retnamlen, rr->u.NM.name, len);
+                       retnamlen += len;
+                       retname[retnamlen] = '\0';
                        break;
                case SIG('R', 'E'):
                        kfree(rs.buffer);
index 03b688d19f6964010c27c16759520315892c780d..37f9678ae4df5c191e1870de88beff21395d4839 100644 (file)
@@ -153,9 +153,9 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
        p = buf + len + nlen;
        *p = '\0';
        for (kn = kn_to; kn != common; kn = kn->parent) {
-               nlen = strlen(kn->name);
-               p -= nlen;
-               memcpy(p, kn->name, nlen);
+               size_t tmp = strlen(kn->name);
+               p -= tmp;
+               memcpy(p, kn->name, tmp);
                *(--p) = '/';
        }
 
index f73541fbe7afadaee17dcb038f7be18c21b4d18f..3d670a3678f2dd6dc24dc9db23ca9cd5a63ab0ed 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/namei.h>
+#include <linux/seq_file.h>
 
 #include "kernfs-internal.h"
 
@@ -40,6 +41,19 @@ static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry)
        return 0;
 }
 
+static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry)
+{
+       struct kernfs_node *node = dentry->d_fsdata;
+       struct kernfs_root *root = kernfs_root(node);
+       struct kernfs_syscall_ops *scops = root->syscall_ops;
+
+       if (scops && scops->show_path)
+               return scops->show_path(sf, node, root);
+
+       seq_dentry(sf, dentry, " \t\n\\");
+       return 0;
+}
+
 const struct super_operations kernfs_sops = {
        .statfs         = simple_statfs,
        .drop_inode     = generic_delete_inode,
@@ -47,6 +61,7 @@ const struct super_operations kernfs_sops = {
 
        .remount_fs     = kernfs_sop_remount_fs,
        .show_options   = kernfs_sop_show_options,
+       .show_path      = kernfs_sop_show_path,
 };
 
 /**
index 1d9ca2d5dff68ee184bf5dbe6d0d417f1b63f245..30145f8f21ed5b47e47fba7e29e0434c76e858d0 100644 (file)
@@ -2266,6 +2266,33 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
 }
 EXPORT_SYMBOL(vfs_path_lookup);
 
+/**
+ * lookup_hash - lookup single pathname component on already hashed name
+ * @name:      name and hash to lookup
+ * @base:      base directory to lookup from
+ *
+ * The name must have been verified and hashed (see lookup_one_len()).  Using
+ * this after just full_name_hash() is unsafe.
+ *
+ * This function also doesn't check for search permission on base directory.
+ *
+ * Use lookup_one_len_unlocked() instead, unless you really know what you are
+ * doing.
+ *
+ * Do not hold i_mutex; this helper takes i_mutex if necessary.
+ */
+struct dentry *lookup_hash(const struct qstr *name, struct dentry *base)
+{
+       struct dentry *ret;
+
+       ret = lookup_dcache(name, base, 0);
+       if (!ret)
+               ret = lookup_slow(name, base, 0);
+
+       return ret;
+}
+EXPORT_SYMBOL(lookup_hash);
+
 /**
  * lookup_one_len - filesystem helper to lookup single pathname component
  * @name:      pathname component to lookup
@@ -2337,7 +2364,6 @@ struct dentry *lookup_one_len_unlocked(const char *name,
        struct qstr this;
        unsigned int c;
        int err;
-       struct dentry *ret;
 
        this.name = name;
        this.len = len;
@@ -2369,10 +2395,7 @@ struct dentry *lookup_one_len_unlocked(const char *name,
        if (err)
                return ERR_PTR(err);
 
-       ret = lookup_dcache(&this, base, 0);
-       if (!ret)
-               ret = lookup_slow(&this, base, 0);
-       return ret;
+       return lookup_hash(&this, base);
 }
 EXPORT_SYMBOL(lookup_one_len_unlocked);
 
@@ -2942,22 +2965,10 @@ no_open:
                dentry = lookup_real(dir, dentry, nd->flags);
                if (IS_ERR(dentry))
                        return PTR_ERR(dentry);
-
-               if (create_error) {
-                       int open_flag = op->open_flag;
-
-                       error = create_error;
-                       if ((open_flag & O_EXCL)) {
-                               if (!dentry->d_inode)
-                                       goto out;
-                       } else if (!dentry->d_inode) {
-                               goto out;
-                       } else if ((open_flag & O_TRUNC) &&
-                                  d_is_reg(dentry)) {
-                               goto out;
-                       }
-                       /* will fail later, go on to get the right error */
-               }
+       }
+       if (create_error && !dentry->d_inode) {
+               error = create_error;
+               goto out;
        }
 looked_up:
        path->dentry = dentry;
@@ -4213,7 +4224,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        bool new_is_dir = false;
        unsigned max_links = new_dir->i_sb->s_max_links;
 
-       if (source == target)
+       /*
+        * Check source == target.
+        * On overlayfs need to look at underlying inodes.
+        */
+       if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
                return 0;
 
        error = may_delete(old_dir, old_dentry, is_dir);
index 0cdf497c91efbb915512aceed2bf58acaa37fa1d..2162434728c022ab4651904b778c21d958ca802d 100644 (file)
@@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
        brelse(di_bh);
        return acl;
 }
+
+int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
+{
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct posix_acl *acl;
+       int ret;
+
+       if (S_ISLNK(inode->i_mode))
+               return -EOPNOTSUPP;
+
+       if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
+               return 0;
+
+       acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
+       if (IS_ERR(acl) || !acl)
+               return PTR_ERR(acl);
+       ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
+       if (ret)
+               return ret;
+       ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
+                           acl, NULL, NULL);
+       posix_acl_release(acl);
+       return ret;
+}
+
+/*
+ * Initialize the ACLs of a new inode. If parent directory has default ACL,
+ * then clone to new inode. Called from ocfs2_mknod.
+ */
+int ocfs2_init_acl(handle_t *handle,
+                  struct inode *inode,
+                  struct inode *dir,
+                  struct buffer_head *di_bh,
+                  struct buffer_head *dir_bh,
+                  struct ocfs2_alloc_context *meta_ac,
+                  struct ocfs2_alloc_context *data_ac)
+{
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct posix_acl *acl = NULL;
+       int ret = 0, ret2;
+       umode_t mode;
+
+       if (!S_ISLNK(inode->i_mode)) {
+               if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+                       acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
+                                                  dir_bh);
+                       if (IS_ERR(acl))
+                               return PTR_ERR(acl);
+               }
+               if (!acl) {
+                       mode = inode->i_mode & ~current_umask();
+                       ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+                       if (ret) {
+                               mlog_errno(ret);
+                               goto cleanup;
+                       }
+               }
+       }
+       if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
+               if (S_ISDIR(inode->i_mode)) {
+                       ret = ocfs2_set_acl(handle, inode, di_bh,
+                                           ACL_TYPE_DEFAULT, acl,
+                                           meta_ac, data_ac);
+                       if (ret)
+                               goto cleanup;
+               }
+               mode = inode->i_mode;
+               ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
+               if (ret < 0)
+                       return ret;
+
+               ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+               if (ret2) {
+                       mlog_errno(ret2);
+                       ret = ret2;
+                       goto cleanup;
+               }
+               if (ret > 0) {
+                       ret = ocfs2_set_acl(handle, inode,
+                                           di_bh, ACL_TYPE_ACCESS,
+                                           acl, meta_ac, data_ac);
+               }
+       }
+cleanup:
+       posix_acl_release(acl);
+       return ret;
+}
index 3fce68d086251a6e26ea9805361e3a1ccb351d46..2783a75b3999e3c6a548bc44c4645ac882048ae6 100644 (file)
@@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
                         struct posix_acl *acl,
                         struct ocfs2_alloc_context *meta_ac,
                         struct ocfs2_alloc_context *data_ac);
+extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
+extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
+                         struct buffer_head *, struct buffer_head *,
+                         struct ocfs2_alloc_context *,
+                         struct ocfs2_alloc_context *);
 
 #endif /* OCFS2_ACL_H */
index 5308841756be24f6e682e361a35f1f7db00f9568..59cce53c91d810e208ef92896e7e2a9ae5806a27 100644 (file)
@@ -1268,20 +1268,20 @@ bail_unlock_rw:
        if (size_change)
                ocfs2_rw_unlock(inode, 1);
 bail:
-       brelse(bh);
 
        /* Release quota pointers in case we acquired them */
        for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
                dqput(transfer_to[qtype]);
 
        if (!status && attr->ia_valid & ATTR_MODE) {
-               status = posix_acl_chmod(inode, inode->i_mode);
+               status = ocfs2_acl_chmod(inode, bh);
                if (status < 0)
                        mlog_errno(status);
        }
        if (inode_locked)
                ocfs2_inode_unlock(inode, 1);
 
+       brelse(bh);
        return status;
 }
 
index 6b3e87189a6467fd3c72533db5c52d149ba2064d..a8f1225e6d9b767f5551bca0699f8e5e8f045e4c 100644 (file)
@@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
        struct ocfs2_dir_lookup_result lookup = { NULL, };
        sigset_t oldset;
        int did_block_signals = 0;
-       struct posix_acl *default_acl = NULL, *acl = NULL;
        struct ocfs2_dentry_lock *dl = NULL;
 
        trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
@@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
                goto leave;
        }
 
-       status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
-       if (status) {
-               mlog_errno(status);
-               goto leave;
-       }
-
        handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
                                                            S_ISDIR(mode),
                                                            xattr_credits));
@@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
                inc_nlink(dir);
        }
 
-       if (default_acl) {
-               status = ocfs2_set_acl(handle, inode, new_fe_bh,
-                                      ACL_TYPE_DEFAULT, default_acl,
-                                      meta_ac, data_ac);
-       }
-       if (!status && acl) {
-               status = ocfs2_set_acl(handle, inode, new_fe_bh,
-                                      ACL_TYPE_ACCESS, acl,
-                                      meta_ac, data_ac);
-       }
+       status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
+                        meta_ac, data_ac);
 
        if (status < 0) {
                mlog_errno(status);
@@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
        d_instantiate(dentry, inode);
        status = 0;
 leave:
-       if (default_acl)
-               posix_acl_release(default_acl);
-       if (acl)
-               posix_acl_release(acl);
        if (status < 0 && did_quota_inode)
                dquot_free_inode(inode);
        if (handle)
index 744d5d90c363a182812e2c7de7ecf4971452ce3c..92bbe93bfe1077332286d5a16fb5ea759c66c466 100644 (file)
@@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
        struct inode *inode = d_inode(old_dentry);
        struct buffer_head *old_bh = NULL;
        struct inode *new_orphan_inode = NULL;
-       struct posix_acl *default_acl, *acl;
-       umode_t mode;
 
        if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
                return -EOPNOTSUPP;
 
-       mode = inode->i_mode;
-       error = posix_acl_create(dir, &mode, &default_acl, &acl);
-       if (error) {
-               mlog_errno(error);
-               return error;
-       }
 
-       error = ocfs2_create_inode_in_orphan(dir, mode,
+       error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
                                             &new_orphan_inode);
        if (error) {
                mlog_errno(error);
@@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
        /* If the security isn't preserved, we need to re-initialize them. */
        if (!preserve) {
                error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
-                                                   &new_dentry->d_name,
-                                                   default_acl, acl);
+                                                   &new_dentry->d_name);
                if (error)
                        mlog_errno(error);
        }
 out:
-       if (default_acl)
-               posix_acl_release(default_acl);
-       if (acl)
-               posix_acl_release(acl);
        if (!error) {
                error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
                                                       new_dentry);
index 7d3d979f57d9142169f93f2c88b27a612dc06dc4..f19b7381a9984a6f85194c503f8f22c591048df9 100644 (file)
@@ -7216,12 +7216,10 @@ out:
  */
 int ocfs2_init_security_and_acl(struct inode *dir,
                                struct inode *inode,
-                               const struct qstr *qstr,
-                               struct posix_acl *default_acl,
-                               struct posix_acl *acl)
+                               const struct qstr *qstr)
 {
-       struct buffer_head *dir_bh = NULL;
        int ret = 0;
+       struct buffer_head *dir_bh = NULL;
 
        ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
        if (ret) {
@@ -7234,11 +7232,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
                mlog_errno(ret);
                goto leave;
        }
-
-       if (!ret && default_acl)
-               ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
-       if (!ret && acl)
-               ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
+       ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
+       if (ret)
+               mlog_errno(ret);
 
        ocfs2_inode_unlock(dir, 0);
        brelse(dir_bh);
index f10d5b93c366c8a7d12ddc1c90766ea88ed3dc56..1633cc15ea1fdf75c7a507d8f5f60b921bf902e6 100644 (file)
@@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
                         bool preserve_security);
 int ocfs2_init_security_and_acl(struct inode *dir,
                                struct inode *inode,
-                               const struct qstr *qstr,
-                               struct posix_acl *default_acl,
-                               struct posix_acl *acl);
+                               const struct qstr *qstr);
 #endif /* OCFS2_XATTR_H */
index 17cb6b1dab753b9de6366f92b6ca3ba3cb204291..081d3d6df74ba54ccd0e1d754640b218e54f4111 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
 int vfs_open(const struct path *path, struct file *file,
             const struct cred *cred)
 {
-       struct dentry *dentry = path->dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
 
-       file->f_path = *path;
-       if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
-               inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
-               if (IS_ERR(inode))
-                       return PTR_ERR(inode);
-       }
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
+       file->f_path = *path;
        return do_dentry_open(file, inode, NULL, cred);
 }
 
index 5d972e6cd3fe97fcae5c782b59c72c0007668566..791235e03d1712ed92b62bc405483dd2e1ab972b 100644 (file)
@@ -411,9 +411,7 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir,
 {
        struct dentry *dentry;
 
-       inode_lock(dir->d_inode);
-       dentry = lookup_one_len(name->name, dir, name->len);
-       inode_unlock(dir->d_inode);
+       dentry = lookup_hash(name, dir);
 
        if (IS_ERR(dentry)) {
                if (PTR_ERR(dentry) == -ENOENT)
index 92e37e224cd22d66d0b9675f1790e110d5e7b415..0d163a84082dbddc846a871a46cfd75aa1eddfb8 100644 (file)
@@ -434,7 +434,7 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
                        && !lookup_symbol_name(wchan, symname))
                seq_printf(m, "%s", symname);
        else
-               seq_puts(m, "0\n");
+               seq_putc(m, '0');
 
        return 0;
 }
index b018eb485019b1ed1bed2ccdc6c7b9914c575963..dd9bf7e410d2975f212accdf827df8a47c2d4c94 100644 (file)
@@ -1143,6 +1143,9 @@ static long do_splice_to(struct file *in, loff_t *ppos,
        if (unlikely(ret < 0))
                return ret;
 
+       if (unlikely(len > MAX_RW_COUNT))
+               len = MAX_RW_COUNT;
+
        if (in->f_op->splice_read)
                splice_read = in->f_op->splice_read;
        else
index 4bb4de8d95ea5869000db22baa662451c6acd1a0..7e9422cb5989c2e84ddd19eba2c91041f7b8622f 100644 (file)
@@ -565,4 +565,16 @@ static inline struct dentry *d_real(struct dentry *dentry)
                return dentry;
 }
 
+static inline struct inode *vfs_select_inode(struct dentry *dentry,
+                                            unsigned open_flags)
+{
+       struct inode *inode = d_inode(dentry);
+
+       if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
+               inode = dentry->d_op->d_select_inode(dentry, open_flags);
+
+       return inode;
+}
+
+
 #endif /* __LINUX_DCACHE_H */
index ec1411c891056daa6f372f5fabc622c9cdb22a52..6fc31ef1da2d8e9efe5d91b8e4349ed4fe0dd809 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/printk.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
+#include <linux/capability.h>
+
 #include <net/sch_generic.h>
 
 #include <asm/cacheflush.h>
@@ -42,6 +44,15 @@ struct bpf_prog_aux;
 #define BPF_REG_X      BPF_REG_7
 #define BPF_REG_TMP    BPF_REG_8
 
+/* Kernel hidden auxiliary/helper register for hardening step.
+ * Only used by eBPF JITs. It's nothing more than a temporary
+ * register that JITs use internally, only that here it's part
+ * of eBPF instructions that have been rewritten for blinding
+ * constants. See JIT pre-step in bpf_jit_blind_constants().
+ */
+#define BPF_REG_AX             MAX_BPF_REG
+#define MAX_BPF_JIT_REG                (MAX_BPF_REG + 1)
+
 /* BPF program can access up to 512 bytes of stack space. */
 #define MAX_BPF_STACK  512
 
@@ -458,7 +469,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 
 int sk_filter(struct sock *sk, struct sk_buff *skb);
 
-int bpf_prog_select_runtime(struct bpf_prog *fp);
+struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
 void bpf_prog_free(struct bpf_prog *fp);
 
 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
@@ -492,10 +503,17 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
-void bpf_int_jit_compile(struct bpf_prog *fp);
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 bool bpf_helper_changes_skb_data(void *func);
 
+struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+                                      const struct bpf_insn *patch, u32 len);
+
 #ifdef CONFIG_BPF_JIT
+extern int bpf_jit_enable;
+extern int bpf_jit_harden;
+
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
 
 struct bpf_binary_header *
@@ -507,6 +525,9 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
 void bpf_jit_compile(struct bpf_prog *fp);
 void bpf_jit_free(struct bpf_prog *fp);
 
+struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
+void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
+
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
                                u32 pass, void *image)
 {
@@ -517,6 +538,33 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
                print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
                               16, 1, image, proglen, false);
 }
+
+static inline bool bpf_jit_is_ebpf(void)
+{
+# ifdef CONFIG_HAVE_EBPF_JIT
+       return true;
+# else
+       return false;
+# endif
+}
+
+static inline bool bpf_jit_blinding_enabled(void)
+{
+       /* These are the prerequisites, should someone ever have the
+        * idea to call blinding outside of them, we make sure to
+        * bail out.
+        */
+       if (!bpf_jit_is_ebpf())
+               return false;
+       if (!bpf_jit_enable)
+               return false;
+       if (!bpf_jit_harden)
+               return false;
+       if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+               return false;
+
+       return true;
+}
 #else
 static inline void bpf_jit_compile(struct bpf_prog *fp)
 {
index c06c44242f3993ae05a8454f844425a0bd14d525..30f089ebe0a4540a91053f406526f323bd1d24f0 100644 (file)
@@ -152,6 +152,8 @@ struct kernfs_syscall_ops {
        int (*rmdir)(struct kernfs_node *kn);
        int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
                      const char *new_name);
+       int (*show_path)(struct seq_file *sf, struct kernfs_node *kn,
+                        struct kernfs_root *root);
 };
 
 struct kernfs_root {
index b288965e8101dc38f8a4cf4f13f55c0ba2dfbb7a..2c14eeca46f039d78c1e82f4ccd3053e4da15786 100644 (file)
@@ -173,10 +173,12 @@ enum s2mps11_regulators {
 
 #define S2MPS11_LDO_VSEL_MASK  0x3F
 #define S2MPS11_BUCK_VSEL_MASK 0xFF
+#define S2MPS11_BUCK9_VSEL_MASK        0x1F
 #define S2MPS11_ENABLE_MASK    (0x03 << S2MPS11_ENABLE_SHIFT)
 #define S2MPS11_ENABLE_SHIFT   0x06
 #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
 #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
 #define S2MPS11_RAMP_DELAY     25000           /* uV/us */
 
 #define S2MPS11_CTRL1_PWRHOLD_MASK     BIT(4)
index 9613143f0561e26297f093134259b4dca6646f75..07b504f7eb8479f295cb48393a0a0104c778151a 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/radix-tree.h>
+#include <linux/workqueue.h>
 
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/doorbell.h>
@@ -457,6 +458,17 @@ struct mlx5_irq_info {
        char name[MLX5_MAX_IRQ_NAME];
 };
 
+struct mlx5_fc_stats {
+       struct list_head list;
+       struct list_head addlist;
+       /* protect addlist add/splice operations */
+       spinlock_t addlist_lock;
+
+       struct workqueue_struct *wq;
+       struct delayed_work work;
+       unsigned long next_query;
+};
+
 struct mlx5_eswitch;
 
 struct mlx5_priv {
@@ -520,6 +532,8 @@ struct mlx5_priv {
        struct mlx5_flow_root_namespace *fdb_root_ns;
        struct mlx5_flow_root_namespace *esw_egress_root_ns;
        struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+
+       struct mlx5_fc_stats            fc_stats;
 };
 
 enum mlx5_device_state {
index 6467569ad76edb90403feca18587158fb7b1a0c1..4b7a107d9c19a8e14121177d887bf53c2d49a15f 100644 (file)
@@ -73,6 +73,7 @@ struct mlx5_flow_destination {
                u32                     tir_num;
                struct mlx5_flow_table  *ft;
                u32                     vport_num;
+               struct mlx5_fc          *counter;
        };
 };
 
@@ -125,4 +126,10 @@ void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
 int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
                                 struct mlx5_flow_destination *dest);
 
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule);
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+                         u64 *bytes, u64 *packets, u64 *lastuse);
+
 #endif
index 4ce4ea422a105ca2acbd5d0fba0499bd64f29b2b..9a05cd7e5890b43400c65f6c903d5d815daffa28 100644 (file)
@@ -202,6 +202,9 @@ enum {
        MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x936,
        MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x937,
        MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY       = 0x938,
+       MLX5_CMD_OP_ALLOC_FLOW_COUNTER            = 0x939,
+       MLX5_CMD_OP_DEALLOC_FLOW_COUNTER          = 0x93a,
+       MLX5_CMD_OP_QUERY_FLOW_COUNTER            = 0x93b,
        MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c
 };
 
@@ -265,7 +268,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
 
 struct mlx5_ifc_flow_table_prop_layout_bits {
        u8         ft_support[0x1];
-       u8         reserved_at_1[0x2];
+       u8         reserved_at_1[0x1];
+       u8         flow_counter[0x1];
        u8         flow_modify_en[0x1];
        u8         modify_root[0x1];
        u8         identified_miss_table_mode[0x1];
@@ -932,6 +936,8 @@ enum mlx5_flow_destination_type {
        MLX5_FLOW_DESTINATION_TYPE_VPORT        = 0x0,
        MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE   = 0x1,
        MLX5_FLOW_DESTINATION_TYPE_TIR          = 0x2,
+
+       MLX5_FLOW_DESTINATION_TYPE_COUNTER      = 0x100,
 };
 
 struct mlx5_ifc_dest_format_struct_bits {
@@ -941,6 +947,19 @@ struct mlx5_ifc_dest_format_struct_bits {
        u8         reserved_at_20[0x20];
 };
 
+struct mlx5_ifc_flow_counter_list_bits {
+       u8         reserved_at_0[0x10];
+       u8         flow_counter_id[0x10];
+
+       u8         reserved_at_20[0x20];
+};
+
+union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+       struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+       struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
+       u8         reserved_at_0[0x40];
+};
+
 struct mlx5_ifc_fte_match_param_bits {
        struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
 
@@ -2006,6 +2025,7 @@ enum {
        MLX5_FLOW_CONTEXT_ACTION_ALLOW     = 0x1,
        MLX5_FLOW_CONTEXT_ACTION_DROP      = 0x2,
        MLX5_FLOW_CONTEXT_ACTION_FWD_DEST  = 0x4,
+       MLX5_FLOW_CONTEXT_ACTION_COUNT     = 0x8,
 };
 
 struct mlx5_ifc_flow_context_bits {
@@ -2022,13 +2042,16 @@ struct mlx5_ifc_flow_context_bits {
        u8         reserved_at_80[0x8];
        u8         destination_list_size[0x18];
 
-       u8         reserved_at_a0[0x160];
+       u8         reserved_at_a0[0x8];
+       u8         flow_counter_list_size[0x18];
+
+       u8         reserved_at_c0[0x140];
 
        struct mlx5_ifc_fte_match_param_bits match_value;
 
        u8         reserved_at_1200[0x600];
 
-       struct mlx5_ifc_dest_format_struct_bits destination[0];
+       union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
 };
 
 enum {
@@ -3937,6 +3960,34 @@ struct mlx5_ifc_query_flow_group_in_bits {
        u8         reserved_at_e0[0x120];
 };
 
+struct mlx5_ifc_query_flow_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_at_40[0x40];
+
+       struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
+};
+
+struct mlx5_ifc_query_flow_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_at_40[0x80];
+
+       u8         clear[0x1];
+       u8         reserved_at_c1[0xf];
+       u8         num_of_counters[0x10];
+
+       u8         reserved_at_e0[0x10];
+       u8         flow_counter_id[0x10];
+};
+
 struct mlx5_ifc_query_esw_vport_context_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
@@ -5510,6 +5561,28 @@ struct mlx5_ifc_dealloc_pd_in_bits {
        u8         reserved_at_60[0x20];
 };
 
+struct mlx5_ifc_dealloc_flow_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_at_40[0x10];
+       u8         flow_counter_id[0x10];
+
+       u8         reserved_at_60[0x20];
+};
+
 struct mlx5_ifc_create_xrc_srq_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
@@ -6237,6 +6310,28 @@ struct mlx5_ifc_alloc_pd_in_bits {
        u8         reserved_at_40[0x40];
 };
 
+struct mlx5_ifc_alloc_flow_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_at_40[0x10];
+       u8         flow_counter_id[0x10];
+
+       u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_flow_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_at_40[0x40];
+};
+
 struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
index 864d7221de846e44e600245eaac773ab7116bcca..8f468e0d2534eddaa178fdf252e7cf4e53f3330b 100644 (file)
@@ -500,11 +500,20 @@ static inline int page_mapcount(struct page *page)
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 int total_mapcount(struct page *page);
+int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
 #else
 static inline int total_mapcount(struct page *page)
 {
        return page_mapcount(page);
 }
+static inline int page_trans_huge_mapcount(struct page *page,
+                                          int *total_mapcount)
+{
+       int mapcount = page_mapcount(page);
+       if (total_mapcount)
+               *total_mapcount = mapcount;
+       return mapcount;
+}
 #endif
 
 static inline struct page *virt_to_head_page(const void *x)
index 77d01700daf7b826bdf19c2022d5260adb640e0b..ec5ec2818a288d52158098e829fb57d68e4ec87b 100644 (file)
@@ -79,6 +79,8 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
 
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
+struct qstr;
+extern struct dentry *lookup_hash(const struct qstr *, struct dentry *);
 
 extern int follow_down_one(struct path *);
 extern int follow_down(struct path *);
index c2f5112f08f703111c56d9bc48e58ef000ff3a4b..c148edfe49659b2590bb8ff08c1ac286449cb74f 100644 (file)
@@ -3759,7 +3759,6 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
 extern int             netdev_max_backlog;
 extern int             netdev_tstamp_prequeue;
 extern int             weight_p;
-extern int             bpf_jit_enable;
 
 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
index 0a4cd4703f403f0d65f867aaa6dcc3e128fcde05..ad220359f1b072cfbf29456871312bbf07d5cceb 100644 (file)
@@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
 extern int page_swapcount(struct page *);
 extern int swp_swapcount(swp_entry_t entry);
 extern struct swap_info_struct *page_swap_info(struct page *);
-extern int reuse_swap_page(struct page *);
+extern bool reuse_swap_page(struct page *, int *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
@@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
        return 0;
 }
 
-#define reuse_swap_page(page) \
-       (!PageTransCompound(page) && page_mapcount(page) == 1)
+#define reuse_swap_page(page, total_mapcount) \
+       (page_trans_huge_mapcount(page, total_mapcount) == 1)
 
 static inline int try_to_free_swap(struct page *page)
 {
index fd9bcfedad42d70c4525fd30178756fb43ccf581..1b5d1cd796e2b753bdd1ff586427a35c033a3ae9 100644 (file)
@@ -87,6 +87,7 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
 unsigned long iov_iter_alignment(const struct iov_iter *i);
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
 void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
                        unsigned long nr_segs, size_t count);
 void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
index 03e322b30218244b9bde7fca5438cacc20923307..2cd9e9bb059a7a9d34fbc0a22e4ca6df0b2a3191 100644 (file)
@@ -106,6 +106,7 @@ struct tc_action_ops {
                        int bind);
        int     (*walk)(struct net *, struct sk_buff *,
                        struct netlink_callback *, int, struct tc_action *);
+       void    (*stats_update)(struct tc_action *, u64, u32, u64);
 };
 
 struct tc_action_net {
@@ -178,10 +179,21 @@ int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
 
 #define tc_for_each_action(_a, _exts) \
        list_for_each_entry(a, &(_exts)->actions, list)
+
+static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
+                                          u64 packets, u64 lastuse)
+{
+       if (!a->ops->stats_update)
+               return;
+
+       a->ops->stats_update(a, bytes, packets, lastuse);
+}
+
 #else /* CONFIG_NET_CLS_ACT */
 
 #define tc_no_actions(_exts) true
 #define tc_for_each_action(_a, _exts) while (0)
+#define tcf_action_stats_update(a, bytes, packets, lastuse)
 
 #endif /* CONFIG_NET_CLS_ACT */
 #endif
index e589cb3dcceede6d4bc6e33bdc5b30830a06ba4d..254a0fc018006a773f31f41bc07e89e5b81528c2 100644 (file)
@@ -98,7 +98,8 @@
  *   nla_put_u8(skb, type, value)      add u8 attribute to skb
  *   nla_put_u16(skb, type, value)     add u16 attribute to skb
  *   nla_put_u32(skb, type, value)     add u32 attribute to skb
- *   nla_put_u64(skb, type, value)     add u64 attribute to skb
+ *   nla_put_u64_64bits(skb, type,
+ *                     value, padattr) add u64 attribute to skb
  *   nla_put_s8(skb, type, value)      add s8 attribute to skb
  *   nla_put_s16(skb, type, value)     add s16 attribute to skb
  *   nla_put_s32(skb, type, value)     add s32 attribute to skb
@@ -846,17 +847,6 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
        return nla_put(skb, attrtype, sizeof(__le32), &value);
 }
 
-/**
- * nla_put_u64 - Add a u64 netlink attribute to a socket buffer
- * @skb: socket buffer to add attribute to
- * @attrtype: attribute type
- * @value: numeric value
- */
-static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
-{
-       return nla_put(skb, attrtype, sizeof(u64), &value);
-}
-
 /**
  * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it
  * @skb: socket buffer to add attribute to
index caa5e18636dfd830e59295ab74d80ffd95bf3f22..0f7efa88f210360928b36a80022c9bd53d778b40 100644 (file)
@@ -392,9 +392,6 @@ struct tc_cls_u32_offload {
        };
 };
 
-/* tca flags definitions */
-#define TCA_CLS_FLAGS_SKIP_HW 1
-
 static inline bool tc_should_offload(struct net_device *dev, u32 flags)
 {
        if (!(dev->features & NETIF_F_HW_TC))
@@ -409,9 +406,27 @@ static inline bool tc_should_offload(struct net_device *dev, u32 flags)
        return true;
 }
 
+static inline bool tc_skip_sw(u32 flags)
+{
+       return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
+}
+
+/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
+static inline bool tc_flags_valid(u32 flags)
+{
+       if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
+               return false;
+
+       if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
+               return false;
+
+       return true;
+}
+
 enum tc_fl_command {
        TC_CLSFLOWER_REPLACE,
        TC_CLSFLOWER_DESTROY,
+       TC_CLSFLOWER_STATS,
 };
 
 struct tc_cls_flower_offload {
index 46e55f0202a61b9be253422b4c32cee4f9f6d911..a1fd76c22a5903cadf2e97c844017925cfbc9f93 100644 (file)
@@ -527,11 +527,27 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
        return q->flags & TCQ_F_CPUSTATS;
 }
 
+static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+                                 __u64 bytes, __u32 packets)
+{
+       bstats->bytes += bytes;
+       bstats->packets += packets;
+}
+
 static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
                                 const struct sk_buff *skb)
 {
-       bstats->bytes += qdisc_pkt_len(skb);
-       bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+       _bstats_update(bstats,
+                      qdisc_pkt_len(skb),
+                      skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
+}
+
+static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+                                     __u64 bytes, __u32 packets)
+{
+       u64_stats_update_begin(&bstats->syncp);
+       _bstats_update(&bstats->bstats, bytes, packets);
+       u64_stats_update_end(&bstats->syncp);
 }
 
 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
index f80277569f24d44380ce0eed7dc47ce43ad863e3..e601c8c3bdc777b6458fe99d39de1f91b16f66d9 100644 (file)
 #ifndef _LINUX_IF_H
 #define _LINUX_IF_H
 
+#include <linux/libc-compat.h>          /* for compatibility with glibc */
 #include <linux/types.h>               /* for "__kernel_caddr_t" et al */
 #include <linux/socket.h>              /* for "struct sockaddr" et al  */
 #include <linux/compiler.h>            /* for "__user" et al           */
 
+#if __UAPI_DEF_IF_IFNAMSIZ
 #define        IFNAMSIZ        16
+#endif /* __UAPI_DEF_IF_IFNAMSIZ */
 #define        IFALIASZ        256
 #include <linux/hdlc/ioctl.h>
 
+/* For glibc compatibility. An empty enum does not compile. */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
+    __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
 /**
  * enum net_device_flags - &struct net_device flags
  *
@@ -68,6 +74,8 @@
  * @IFF_ECHO: echo sent packets. Volatile.
  */
 enum net_device_flags {
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
        IFF_UP                          = 1<<0,  /* sysfs */
        IFF_BROADCAST                   = 1<<1,  /* volatile */
        IFF_DEBUG                       = 1<<2,  /* sysfs */
@@ -84,11 +92,17 @@ enum net_device_flags {
        IFF_PORTSEL                     = 1<<13, /* sysfs */
        IFF_AUTOMEDIA                   = 1<<14, /* sysfs */
        IFF_DYNAMIC                     = 1<<15, /* sysfs */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
        IFF_LOWER_UP                    = 1<<16, /* volatile */
        IFF_DORMANT                     = 1<<17, /* volatile */
        IFF_ECHO                        = 1<<18, /* volatile */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
 };
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
 #define IFF_UP                         IFF_UP
 #define IFF_BROADCAST                  IFF_BROADCAST
 #define IFF_DEBUG                      IFF_DEBUG
@@ -105,9 +119,13 @@ enum net_device_flags {
 #define IFF_PORTSEL                    IFF_PORTSEL
 #define IFF_AUTOMEDIA                  IFF_AUTOMEDIA
 #define IFF_DYNAMIC                    IFF_DYNAMIC
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
 #define IFF_LOWER_UP                   IFF_LOWER_UP
 #define IFF_DORMANT                    IFF_DORMANT
 #define IFF_ECHO                       IFF_ECHO
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
 
 #define IFF_VOLATILE   (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
                IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
@@ -166,6 +184,8 @@ enum {
  *     being very small might be worth keeping for clean configuration.
  */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFMAP
 struct ifmap {
        unsigned long mem_start;
        unsigned long mem_end;
@@ -175,6 +195,7 @@ struct ifmap {
        unsigned char port;
        /* 3 bytes spare */
 };
+#endif /* __UAPI_DEF_IF_IFMAP */
 
 struct if_settings {
        unsigned int type;      /* Type of physical device or protocol */
@@ -200,6 +221,8 @@ struct if_settings {
  * remainder may be interface specific.
  */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFREQ
 struct ifreq {
 #define IFHWADDRLEN    6
        union
@@ -223,6 +246,7 @@ struct ifreq {
                struct  if_settings ifru_settings;
        } ifr_ifru;
 };
+#endif /* __UAPI_DEF_IF_IFREQ */
 
 #define ifr_name       ifr_ifrn.ifrn_name      /* interface name       */
 #define ifr_hwaddr     ifr_ifru.ifru_hwaddr    /* MAC address          */
@@ -249,6 +273,8 @@ struct ifreq {
  * must know all networks accessible).
  */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFCONF
 struct ifconf  {
        int     ifc_len;                        /* size of buffer       */
        union {
@@ -256,6 +282,8 @@ struct ifconf  {
                struct ifreq __user *ifcu_req;
        } ifc_ifcu;
 };
+#endif /* __UAPI_DEF_IF_IFCONF */
+
 #define        ifc_buf ifc_ifcu.ifcu_buf               /* buffer address       */
 #define        ifc_req ifc_ifcu.ifcu_req               /* array of structures  */
 
index 7d024ceb075d8d4cd657c1c25db37a748940fc89..d5e38c73377c05fa44607bffe8bfb01112330706 100644 (file)
 /* We have included glibc headers... */
 #if defined(__GLIBC__)
 
+/* Coordinate with glibc net/if.h header. */
+#if defined(_NET_IF_H)
+
+/* GLIBC headers included first so don't define anything
+ * that would already be defined. */
+
+#define __UAPI_DEF_IF_IFCONF 0
+#define __UAPI_DEF_IF_IFMAP 0
+#define __UAPI_DEF_IF_IFNAMSIZ 0
+#define __UAPI_DEF_IF_IFREQ 0
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+
+#else /* _NET_IF_H */
+
+/* Linux headers included first, and we must define everything
+ * we need. The expectation is that glibc will check the
+ * __UAPI_DEF_* defines and adjust appropriately. */
+
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
+#endif /* _NET_IF_H */
+
 /* Coordinate with glibc netinet/in.h header. */
 #if defined(_NETINET_IN_H)
 
  * that we need. */
 #else /* !defined(__GLIBC__) */
 
+/* Definitions for if.h */
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
 /* Definitions for in.h */
 #define __UAPI_DEF_IN_ADDR             1
 #define __UAPI_DEF_IN_IPPROTO          1
index 84660905fedf92c0448c218ccf90ee768cb8d5e3..eba5914ba5d153fc3d047656cb7772c1559b5766 100644 (file)
@@ -151,6 +151,10 @@ enum {
 
 #define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
 
+/* tca flags definitions */
+#define TCA_CLS_FLAGS_SKIP_HW  (1 << 0)
+#define TCA_CLS_FLAGS_SKIP_SW  (1 << 1)
+
 /* U32 filters */
 
 #define TC_U32_HTID(h) ((h)&0xFFF00000)
index 242cf0c6e33d37f229a224839ca6f679bb142674..e3969bd939e41d7fb1ff47f61344fad5f1cae482 100644 (file)
@@ -10,3 +10,4 @@ header-y += tc_skbedit.h
 header-y += tc_vlan.h
 header-y += tc_bpf.h
 header-y += tc_connmark.h
+header-y += tc_ife.h
index d781b077431f90102af4f8203c2e89ff30d6693a..f1e8a0def99b0b530f5b5ddf6dc50528cb64509f 100644 (file)
@@ -129,14 +129,83 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 
        return fp;
 }
-EXPORT_SYMBOL_GPL(bpf_prog_realloc);
 
 void __bpf_prog_free(struct bpf_prog *fp)
 {
        kfree(fp->aux);
        vfree(fp);
 }
-EXPORT_SYMBOL_GPL(__bpf_prog_free);
+
+static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
+{
+       return BPF_CLASS(insn->code) == BPF_JMP  &&
+              /* Call and Exit are both special jumps with no
+               * target inside the BPF instruction image.
+               */
+              BPF_OP(insn->code) != BPF_CALL &&
+              BPF_OP(insn->code) != BPF_EXIT;
+}
+
+static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
+{
+       struct bpf_insn *insn = prog->insnsi;
+       u32 i, insn_cnt = prog->len;
+
+       for (i = 0; i < insn_cnt; i++, insn++) {
+               if (!bpf_is_jmp_and_has_target(insn))
+                       continue;
+
+               /* Adjust offset of jmps if we cross boundaries. */
+               if (i < pos && i + insn->off + 1 > pos)
+                       insn->off += delta;
+               else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
+                       insn->off -= delta;
+       }
+}
+
+struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+                                      const struct bpf_insn *patch, u32 len)
+{
+       u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
+       struct bpf_prog *prog_adj;
+
+       /* Since our patchlet doesn't expand the image, we're done. */
+       if (insn_delta == 0) {
+               memcpy(prog->insnsi + off, patch, sizeof(*patch));
+               return prog;
+       }
+
+       insn_adj_cnt = prog->len + insn_delta;
+
+       /* Several new instructions need to be inserted. Make room
+        * for them. Likely, there's no need for a new allocation as
+        * last page could have large enough tailroom.
+        */
+       prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
+                                   GFP_USER);
+       if (!prog_adj)
+               return NULL;
+
+       prog_adj->len = insn_adj_cnt;
+
+       /* Patching happens in 3 steps:
+        *
+        * 1) Move over tail of insnsi from next instruction onwards,
+        *    so we can patch the single target insn with one or more
+        *    new ones (patching is always from 1 to n insns, n > 0).
+        * 2) Inject new instructions at the target location.
+        * 3) Adjust branch offsets if necessary.
+        */
+       insn_rest = insn_adj_cnt - off - len;
+
+       memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
+               sizeof(*patch) * insn_rest);
+       memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
+
+       bpf_adj_branches(prog_adj, off, insn_delta);
+
+       return prog_adj;
+}
 
 #ifdef CONFIG_BPF_JIT
 struct bpf_binary_header *
@@ -174,6 +243,209 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 {
        module_memfree(hdr);
 }
+
+int bpf_jit_harden __read_mostly;
+
+static int bpf_jit_blind_insn(const struct bpf_insn *from,
+                             const struct bpf_insn *aux,
+                             struct bpf_insn *to_buff)
+{
+       struct bpf_insn *to = to_buff;
+       u32 imm_rnd = prandom_u32();
+       s16 off;
+
+       BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
+       BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
+
+       if (from->imm == 0 &&
+           (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
+            from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
+               *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
+               goto out;
+       }
+
+       switch (from->code) {
+       case BPF_ALU | BPF_ADD | BPF_K:
+       case BPF_ALU | BPF_SUB | BPF_K:
+       case BPF_ALU | BPF_AND | BPF_K:
+       case BPF_ALU | BPF_OR  | BPF_K:
+       case BPF_ALU | BPF_XOR | BPF_K:
+       case BPF_ALU | BPF_MUL | BPF_K:
+       case BPF_ALU | BPF_MOV | BPF_K:
+       case BPF_ALU | BPF_DIV | BPF_K:
+       case BPF_ALU | BPF_MOD | BPF_K:
+               *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+               *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+               *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
+               break;
+
+       case BPF_ALU64 | BPF_ADD | BPF_K:
+       case BPF_ALU64 | BPF_SUB | BPF_K:
+       case BPF_ALU64 | BPF_AND | BPF_K:
+       case BPF_ALU64 | BPF_OR  | BPF_K:
+       case BPF_ALU64 | BPF_XOR | BPF_K:
+       case BPF_ALU64 | BPF_MUL | BPF_K:
+       case BPF_ALU64 | BPF_MOV | BPF_K:
+       case BPF_ALU64 | BPF_DIV | BPF_K:
+       case BPF_ALU64 | BPF_MOD | BPF_K:
+               *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+               *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+               *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
+               break;
+
+       case BPF_JMP | BPF_JEQ  | BPF_K:
+       case BPF_JMP | BPF_JNE  | BPF_K:
+       case BPF_JMP | BPF_JGT  | BPF_K:
+       case BPF_JMP | BPF_JGE  | BPF_K:
+       case BPF_JMP | BPF_JSGT | BPF_K:
+       case BPF_JMP | BPF_JSGE | BPF_K:
+       case BPF_JMP | BPF_JSET | BPF_K:
+               /* Accommodate for extra offset in case of a backjump. */
+               off = from->off;
+               if (off < 0)
+                       off -= 2;
+               *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+               *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+               *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
+               break;
+
+       case BPF_LD | BPF_ABS | BPF_W:
+       case BPF_LD | BPF_ABS | BPF_H:
+       case BPF_LD | BPF_ABS | BPF_B:
+               *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+               *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+               *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
+               break;
+
+       case BPF_LD | BPF_IND | BPF_W:
+       case BPF_LD | BPF_IND | BPF_H:
+       case BPF_LD | BPF_IND | BPF_B:
+               *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+               *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+               *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
+               *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
+               break;
+
+       case BPF_LD | BPF_IMM | BPF_DW:
+               *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
+               *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+               *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
+               *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
+               break;
+       case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
+               *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
+               *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+               *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
+               break;
+
+       case BPF_ST | BPF_MEM | BPF_DW:
+       case BPF_ST | BPF_MEM | BPF_W:
+       case BPF_ST | BPF_MEM | BPF_H:
+       case BPF_ST | BPF_MEM | BPF_B:
+               *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+               *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+               *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
+               break;
+       }
+out:
+       return to - to_buff;
+}
+
+static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
+                                             gfp_t gfp_extra_flags)
+{
+       gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
+                         gfp_extra_flags;
+       struct bpf_prog *fp;
+
+       fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
+       if (fp != NULL) {
+               kmemcheck_annotate_bitfield(fp, meta);
+
+               /* aux->prog still points to the fp_other one, so
+                * when promoting the clone to the real program,
+                * this still needs to be adapted.
+                */
+               memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
+       }
+
+       return fp;
+}
+
+static void bpf_prog_clone_free(struct bpf_prog *fp)
+{
+       /* aux was stolen by the other clone, so we cannot free
+        * it from this path! It will be freed eventually by the
+        * other program on release.
+        *
+        * At this point, we don't need a deferred release since
+        * clone is guaranteed to not be locked.
+        */
+       fp->aux = NULL;
+       __bpf_prog_free(fp);
+}
+
+void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
+{
+       /* We have to repoint aux->prog to self, as we don't
+        * know whether fp here is the clone or the original.
+        */
+       fp->aux->prog = fp;
+       bpf_prog_clone_free(fp_other);
+}
+
+struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
+{
+       struct bpf_insn insn_buff[16], aux[2];
+       struct bpf_prog *clone, *tmp;
+       int insn_delta, insn_cnt;
+       struct bpf_insn *insn;
+       int i, rewritten;
+
+       if (!bpf_jit_blinding_enabled())
+               return prog;
+
+       clone = bpf_prog_clone_create(prog, GFP_USER);
+       if (!clone)
+               return ERR_PTR(-ENOMEM);
+
+       insn_cnt = clone->len;
+       insn = clone->insnsi;
+
+       for (i = 0; i < insn_cnt; i++, insn++) {
+               /* We temporarily need to hold the original ld64 insn
+                * so that we can still access the first part in the
+                * second blinding run.
+                */
+               if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
+                   insn[1].code == 0)
+                       memcpy(aux, insn, sizeof(aux));
+
+               rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
+               if (!rewritten)
+                       continue;
+
+               tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
+               if (!tmp) {
+                       /* Patching may have repointed aux->prog during
+                        * realloc from the original one, so we need to
+                        * fix it up here on error.
+                        */
+                       bpf_jit_prog_release_other(prog, clone);
+                       return ERR_PTR(-ENOMEM);
+               }
+
+               clone = tmp;
+               insn_delta = rewritten - 1;
+
+               /* Walk new program and skip insns we just inserted. */
+               insn = clone->insnsi + i + insn_delta;
+               insn_cnt += insn_delta;
+               i        += insn_delta;
+       }
+
+       return clone;
+}
 #endif /* CONFIG_BPF_JIT */
 
 /* Base function for offset calculation. Needs to go into .text section,
@@ -692,15 +964,22 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
 /**
  *     bpf_prog_select_runtime - select exec runtime for BPF program
  *     @fp: bpf_prog populated with internal BPF program
+ *     @err: pointer to error variable
  *
  * Try to JIT eBPF program, if JIT is not available, use interpreter.
  * The BPF program will be executed via BPF_PROG_RUN() macro.
  */
-int bpf_prog_select_runtime(struct bpf_prog *fp)
+struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
        fp->bpf_func = (void *) __bpf_prog_run;
 
-       bpf_int_jit_compile(fp);
+       /* eBPF JITs can rewrite the program in case constant
+        * blinding is active. However, in case of error during
+        * blinding, bpf_int_jit_compile() must always return a
+        * valid program, which in this case would simply not
+        * be JITed, but falls back to the interpreter.
+        */
+       fp = bpf_int_jit_compile(fp);
        bpf_prog_lock_ro(fp);
 
        /* The tail call compatibility check can only be done at
@@ -708,7 +987,9 @@ int bpf_prog_select_runtime(struct bpf_prog *fp)
         * with JITed or non JITed program concatenations and not
         * all eBPF JITs might immediately support all features.
         */
-       return bpf_check_tail_call(fp);
+       *err = bpf_check_tail_call(fp);
+
+       return fp;
 }
 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
 
@@ -790,8 +1071,9 @@ const struct bpf_func_proto bpf_tail_call_proto = {
 };
 
 /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
-void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
 {
+       return prog;
 }
 
 bool __weak bpf_helper_changes_skb_data(void *func)
index cf5e9f7ad13ad13ebb5b6bc5a775beca834c7c86..46ecce4b79ede6880df64855818fbc29cf808059 100644 (file)
@@ -762,7 +762,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        fixup_bpf_calls(prog);
 
        /* eBPF program is ready to be JITed */
-       err = bpf_prog_select_runtime(prog);
+       prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
                goto free_used_maps;
 
index 84bff68cf80e0c20baf44df53b203c4e3ddd6b8c..a08d66215245712b5c08808ae6b00058906c99f7 100644 (file)
@@ -2587,26 +2587,6 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
                        insn->src_reg = 0;
 }
 
-static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
-{
-       struct bpf_insn *insn = prog->insnsi;
-       int insn_cnt = prog->len;
-       int i;
-
-       for (i = 0; i < insn_cnt; i++, insn++) {
-               if (BPF_CLASS(insn->code) != BPF_JMP ||
-                   BPF_OP(insn->code) == BPF_CALL ||
-                   BPF_OP(insn->code) == BPF_EXIT)
-                       continue;
-
-               /* adjust offset of jmps if necessary */
-               if (i < pos && i + insn->off + 1 > pos)
-                       insn->off += delta;
-               else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
-                       insn->off -= delta;
-       }
-}
-
 /* convert load instructions that access fields of 'struct __sk_buff'
  * into sequence of instructions that access fields of 'struct sk_buff'
  */
@@ -2616,14 +2596,15 @@ static int convert_ctx_accesses(struct verifier_env *env)
        int insn_cnt = env->prog->len;
        struct bpf_insn insn_buf[16];
        struct bpf_prog *new_prog;
-       u32 cnt;
-       int i;
        enum bpf_access_type type;
+       int i;
 
        if (!env->prog->aux->ops->convert_ctx_access)
                return 0;
 
        for (i = 0; i < insn_cnt; i++, insn++) {
+               u32 insn_delta, cnt;
+
                if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
                        type = BPF_READ;
                else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
@@ -2645,34 +2626,18 @@ static int convert_ctx_accesses(struct verifier_env *env)
                        return -EINVAL;
                }
 
-               if (cnt == 1) {
-                       memcpy(insn, insn_buf, sizeof(*insn));
-                       continue;
-               }
-
-               /* several new insns need to be inserted. Make room for them */
-               insn_cnt += cnt - 1;
-               new_prog = bpf_prog_realloc(env->prog,
-                                           bpf_prog_size(insn_cnt),
-                                           GFP_USER);
+               new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt);
                if (!new_prog)
                        return -ENOMEM;
 
-               new_prog->len = insn_cnt;
-
-               memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
-                       sizeof(*insn) * (insn_cnt - i - cnt));
-
-               /* copy substitute insns in place of load instruction */
-               memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
-
-               /* adjust branches in the whole program */
-               adjust_branches(new_prog, i, cnt - 1);
+               insn_delta = cnt - 1;
 
                /* keep walking new program and skip insns we just inserted */
                env->prog = new_prog;
-               insn = new_prog->insnsi + i + cnt - 1;
-               i += cnt - 1;
+               insn      = new_prog->insnsi + i + insn_delta;
+
+               insn_cnt += insn_delta;
+               i        += insn_delta;
        }
 
        return 0;
index 909a7d31ffd3d3083c2253aefd4253715bcb2278..86cb5c6e89320f28e17691c6d69e58c9dfde81fb 100644 (file)
@@ -1215,6 +1215,41 @@ static void cgroup_destroy_root(struct cgroup_root *root)
        cgroup_free_root(root);
 }
 
+/*
+ * look up cgroup associated with current task's cgroup namespace on the
+ * specified hierarchy
+ */
+static struct cgroup *
+current_cgns_cgroup_from_root(struct cgroup_root *root)
+{
+       struct cgroup *res = NULL;
+       struct css_set *cset;
+
+       lockdep_assert_held(&css_set_lock);
+
+       rcu_read_lock();
+
+       cset = current->nsproxy->cgroup_ns->root_cset;
+       if (cset == &init_css_set) {
+               res = &root->cgrp;
+       } else {
+               struct cgrp_cset_link *link;
+
+               list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
+                       struct cgroup *c = link->cgrp;
+
+                       if (c->root == root) {
+                               res = c;
+                               break;
+                       }
+               }
+       }
+       rcu_read_unlock();
+
+       BUG_ON(!res);
+       return res;
+}
+
 /* look up cgroup associated with given css_set on the specified hierarchy */
 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
                                            struct cgroup_root *root)
@@ -1593,6 +1628,33 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
        return 0;
 }
 
+static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
+                           struct kernfs_root *kf_root)
+{
+       int len = 0;
+       char *buf = NULL;
+       struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
+       struct cgroup *ns_cgroup;
+
+       buf = kmalloc(PATH_MAX, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       spin_lock_bh(&css_set_lock);
+       ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
+       len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
+       spin_unlock_bh(&css_set_lock);
+
+       if (len >= PATH_MAX)
+               len = -ERANGE;
+       else if (len > 0) {
+               seq_escape(sf, buf, " \t\n\\");
+               len = 0;
+       }
+       kfree(buf);
+       return len;
+}
+
 static int cgroup_show_options(struct seq_file *seq,
                               struct kernfs_root *kf_root)
 {
@@ -5433,6 +5495,7 @@ static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
        .mkdir                  = cgroup_mkdir,
        .rmdir                  = cgroup_rmdir,
        .rename                 = cgroup_rename,
+       .show_path              = cgroup_show_path,
 };
 
 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
index 0bdc6e7d4908f23ae79f84a13f0bd112e13f9518..5b167256451889db0240d5692a1f6d3a5987d879 100644 (file)
@@ -351,7 +351,7 @@ static struct srcu_struct pmus_srcu;
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
  */
-int sysctl_perf_event_paranoid __read_mostly = 1;
+int sysctl_perf_event_paranoid __read_mostly = 2;
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
index c61f0cbd308b5b4456e69c1539f9b334a34c0632..7611d0f66cf85adfab20f6374517a0efc645ebe4 100644 (file)
@@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
                         bool truncated)
 {
        struct ring_buffer *rb = handle->rb;
+       bool wakeup = truncated;
        unsigned long aux_head;
        u64 flags = 0;
 
@@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
        aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
 
        if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
-               perf_output_wakeup(handle);
+               wakeup = true;
                local_add(rb->aux_watermark, &rb->aux_wakeup);
        }
+
+       if (wakeup) {
+               if (truncated)
+                       handle->event->pending_disable = 1;
+               perf_output_wakeup(handle);
+       }
+
        handle->event = NULL;
 
        local_set(&rb->aux_nest, 0);
index affd97ec9f65a0c1b9751ee9f1d39cf77757b0aa..686ec8adf952fbd3767c652515089d92b3827ae0 100644 (file)
@@ -1394,6 +1394,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
                                     !cpumask_test_cpu(later_rq->cpu,
                                                       &task->cpus_allowed) ||
                                     task_running(rq, task) ||
+                                    !dl_task(task) ||
                                     !task_on_rq_queued(task))) {
                                double_unlock_balance(rq, later_rq);
                                later_rq = NULL;
index 0fe30e66aff1db44d58ec96cbee332a78257e4d3..e7dd0ec169bea82c630e2b8d897d2aee0cc9571b 100644 (file)
@@ -3030,7 +3030,14 @@ static int idle_balance(struct rq *this_rq);
 
 #else /* CONFIG_SMP */
 
-static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
+static inline void update_load_avg(struct sched_entity *se, int not_used)
+{
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       struct rq *rq = rq_of(cfs_rq);
+
+       cpufreq_trigger_update(rq_clock(rq));
+}
+
 static inline void
 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void
@@ -3181,25 +3188,17 @@ static inline void check_schedstat_required(void)
 static void
 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
-       bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING);
-       bool curr = cfs_rq->curr == se;
-
        /*
-        * If we're the current task, we must renormalise before calling
-        * update_curr().
+        * Update the normalized vruntime before updating min_vruntime
+        * through calling update_curr().
         */
-       if (renorm && curr)
+       if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
                se->vruntime += cfs_rq->min_vruntime;
 
-       update_curr(cfs_rq);
-
        /*
-        * Otherwise, renormalise after, such that we're placed at the current
-        * moment in time, instead of some random moment in the past.
+        * Update run-time statistics of the 'current'.
         */
-       if (renorm && !curr)
-               se->vruntime += cfs_rq->min_vruntime;
-
+       update_curr(cfs_rq);
        enqueue_entity_load_avg(cfs_rq, se);
        account_entity_enqueue(cfs_rq, se);
        update_cfs_shares(cfs_rq);
@@ -3215,7 +3214,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
                update_stats_enqueue(cfs_rq, se);
                check_spread(cfs_rq, se);
        }
-       if (!curr)
+       if (se != cfs_rq->curr)
                __enqueue_entity(cfs_rq, se);
        se->on_rq = 1;
 
index c41ea7ac1764b831fd015531c3d8f40d445bf8ae..ec4f538d4396beb20e2656923995438d5093f667 100644 (file)
@@ -1729,6 +1729,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                                     !cpumask_test_cpu(lowest_rq->cpu,
                                                       tsk_cpus_allowed(task)) ||
                                     task_running(rq, task) ||
+                                    !rt_task(task) ||
                                     !task_on_rq_queued(task))) {
 
                                double_unlock_balance(rq, lowest_rq);
index 3bfdff06eea728b38364652808e9f548f0e6fe37..5f5068e94003d80836040d75931269aca247e853 100644 (file)
@@ -4554,6 +4554,17 @@ static void rebind_workers(struct worker_pool *pool)
                                                  pool->attrs->cpumask) < 0);
 
        spin_lock_irq(&pool->lock);
+
+       /*
+        * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
+        * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
+        * being reworked and this can go away in time.
+        */
+       if (!(pool->flags & POOL_DISASSOCIATED)) {
+               spin_unlock_irq(&pool->lock);
+               return;
+       }
+
        pool->flags &= ~POOL_DISASSOCIATED;
 
        for_each_pool_worker(worker, pool) {
index 2b3f46c049d458a590d080823b344da3b3229f7c..554522934c442ae15c01b1b1266e8e3e9515bfbd 100644 (file)
@@ -74,7 +74,7 @@ next_tag:
 
        /* Extract a tag from the data */
        tag = data[dp++];
-       if (tag == 0) {
+       if (tag == ASN1_EOC) {
                /* It appears to be an EOC. */
                if (data[dp++] != 0)
                        goto invalid_eoc;
@@ -96,10 +96,8 @@ next_tag:
 
        /* Extract the length */
        len = data[dp++];
-       if (len <= 0x7f) {
-               dp += len;
-               goto next_tag;
-       }
+       if (len <= 0x7f)
+               goto check_length;
 
        if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
                /* Indefinite length */
@@ -110,14 +108,18 @@ next_tag:
        }
 
        n = len - 0x80;
-       if (unlikely(n > sizeof(size_t) - 1))
+       if (unlikely(n > sizeof(len) - 1))
                goto length_too_long;
        if (unlikely(n > datalen - dp))
                goto data_overrun_error;
-       for (len = 0; n > 0; n--) {
+       len = 0;
+       for (; n > 0; n--) {
                len <<= 8;
                len |= data[dp++];
        }
+check_length:
+       if (len > datalen - dp)
+               goto data_overrun_error;
        dp += len;
        goto next_tag;
 
index 5fecddc32b1b47e1c2d7ce9706b9a840995f3d4e..ca5316e0087b55de6830f6a2abe24e41291015b3 100644 (file)
@@ -569,6 +569,25 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
 }
 EXPORT_SYMBOL(iov_iter_alignment);
 
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
+{
+        unsigned long res = 0;
+       size_t size = i->count;
+       if (!size)
+               return 0;
+
+       iterate_all_kinds(i, size, v,
+               (res |= (!res ? 0 : (unsigned long)v.iov_base) |
+                       (size != v.iov_len ? size : 0), 0),
+               (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
+                       (size != v.bv_len ? size : 0)),
+               (res |= (!res ? 0 : (unsigned long)v.iov_base) |
+                       (size != v.iov_len ? size : 0))
+               );
+               return res;
+}
+EXPORT_SYMBOL(iov_iter_gap_alignment);
+
 ssize_t iov_iter_get_pages(struct iov_iter *i,
                   struct page **pages, size_t maxsize, unsigned maxpages,
                   size_t *start)
index 8f22fbedc3a699d07f05375d0a0832cc9da1d1b0..93f45011a59d01ef017aa2a1a4a68ee6f4060bce 100644 (file)
@@ -5621,7 +5621,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
                fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
                memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 
-               bpf_prog_select_runtime(fp);
+               /* We cannot error here as we don't need type compatibility
+                * checks.
+                */
+               fp = bpf_prog_select_runtime(fp, err);
                break;
        }
 
index f7daa7de8f4867dc871edb8ea3a1ec6189e4d54f..b49ee126d4d1feb7a89a9c776edc89141c3351da 100644 (file)
@@ -1298,15 +1298,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
        /*
         * We can only reuse the page if nobody else maps the huge page or it's
-        * part. We can do it by checking page_mapcount() on each sub-page, but
-        * it's expensive.
-        * The cheaper way is to check page_count() to be equal 1: every
-        * mapcount takes page reference reference, so this way we can
-        * guarantee, that the PMD is the only mapping.
-        * This can give false negative if somebody pinned the page, but that's
-        * fine.
+        * part.
         */
-       if (page_mapcount(page) == 1 && page_count(page) == 1) {
+       if (page_trans_huge_mapcount(page, NULL) == 1) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
@@ -2079,7 +2073,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                if (pte_write(pteval)) {
                        writable = true;
                } else {
-                       if (PageSwapCache(page) && !reuse_swap_page(page)) {
+                       if (PageSwapCache(page) &&
+                           !reuse_swap_page(page, NULL)) {
                                unlock_page(page);
                                result = SCAN_SWAP_CACHE_PAGE;
                                goto out;
@@ -3222,6 +3217,64 @@ int total_mapcount(struct page *page)
        return ret;
 }
 
+/*
+ * This calculates accurately how many mappings a transparent hugepage
+ * has (unlike page_mapcount() which isn't fully accurate). This full
+ * accuracy is primarily needed to know if copy-on-write faults can
+ * reuse the page and change the mapping to read-write instead of
+ * copying them. At the same time this returns the total_mapcount too.
+ *
+ * The function returns the highest mapcount any one of the subpages
+ * has. If the return value is one, even if different processes are
+ * mapping different subpages of the transparent hugepage, they can
+ * all reuse it, because each process is reusing a different subpage.
+ *
+ * The total_mapcount is instead counting all virtual mappings of the
+ * subpages. If the total_mapcount is equal to "one", it tells the
+ * caller all mappings belong to the same "mm" and in turn the
+ * anon_vma of the transparent hugepage can become the vma->anon_vma
+ * local one as no other process may be mapping any of the subpages.
+ *
+ * It would be more accurate to replace page_mapcount() with
+ * page_trans_huge_mapcount(), however we only use
+ * page_trans_huge_mapcount() in the copy-on-write faults where we
+ * need full accuracy to avoid breaking page pinning, because
+ * page_trans_huge_mapcount() is slower than page_mapcount().
+ */
+int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
+{
+       int i, ret, _total_mapcount, mapcount;
+
+       /* hugetlbfs shouldn't call it */
+       VM_BUG_ON_PAGE(PageHuge(page), page);
+
+       if (likely(!PageTransCompound(page))) {
+               mapcount = atomic_read(&page->_mapcount) + 1;
+               if (total_mapcount)
+                       *total_mapcount = mapcount;
+               return mapcount;
+       }
+
+       page = compound_head(page);
+
+       _total_mapcount = ret = 0;
+       for (i = 0; i < HPAGE_PMD_NR; i++) {
+               mapcount = atomic_read(&page[i]._mapcount) + 1;
+               ret = max(ret, mapcount);
+               _total_mapcount += mapcount;
+       }
+       if (PageDoubleMap(page)) {
+               ret -= 1;
+               _total_mapcount -= HPAGE_PMD_NR;
+       }
+       mapcount = compound_mapcount(page);
+       ret += mapcount;
+       _total_mapcount += mapcount;
+       if (total_mapcount)
+               *total_mapcount = _total_mapcount;
+       return ret;
+}
+
 /*
  * This function splits huge page into normal pages. @page can point to any
  * subpage of huge page to split. Split doesn't change the position of @page.
index b99e828172f6ef30e279e4d07b7a74ba9cbb36db..4786b4150f62108438f5aabaf77d339280eb51dc 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -783,6 +783,7 @@ static int unmerge_and_remove_all_rmap_items(void)
                }
 
                remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
+               up_read(&mm->mmap_sem);
 
                spin_lock(&ksm_mmlist_lock);
                ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
@@ -794,12 +795,9 @@ static int unmerge_and_remove_all_rmap_items(void)
 
                        free_mm_slot(mm_slot);
                        clear_bit(MMF_VM_MERGEABLE, &mm->flags);
-                       up_read(&mm->mmap_sem);
                        mmdrop(mm);
-               } else {
+               } else
                        spin_unlock(&ksm_mmlist_lock);
-                       up_read(&mm->mmap_sem);
-               }
        }
 
        /* Clean up stable nodes, but don't worry if some are still busy */
@@ -1663,8 +1661,15 @@ next_mm:
                up_read(&mm->mmap_sem);
                mmdrop(mm);
        } else {
-               spin_unlock(&ksm_mmlist_lock);
                up_read(&mm->mmap_sem);
+               /*
+                * up_read(&mm->mmap_sem) first because after
+                * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
+                * already have been freed under us by __ksm_exit()
+                * because the "mm_slot" is still hashed and
+                * ksm_scan.mm_slot doesn't point to it anymore.
+                */
+               spin_unlock(&ksm_mmlist_lock);
        }
 
        /* Repeat until we've completed scanning the whole list */
index 52c218e2b724cdb8db1678c4070f31ab033daf65..07493e34ab7e281936d43cb84e681728a23cbc4a 100644 (file)
@@ -2373,6 +2373,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * not dirty accountable.
         */
        if (PageAnon(old_page) && !PageKsm(old_page)) {
+               int total_mapcount;
                if (!trylock_page(old_page)) {
                        get_page(old_page);
                        pte_unmap_unlock(page_table, ptl);
@@ -2387,13 +2388,18 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        }
                        put_page(old_page);
                }
-               if (reuse_swap_page(old_page)) {
-                       /*
-                        * The page is all ours.  Move it to our anon_vma so
-                        * the rmap code will not search our parent or siblings.
-                        * Protected against the rmap code by the page lock.
-                        */
-                       page_move_anon_rmap(old_page, vma, address);
+               if (reuse_swap_page(old_page, &total_mapcount)) {
+                       if (total_mapcount == 1) {
+                               /*
+                                * The page is all ours. Move it to
+                                * our anon_vma so the rmap code will
+                                * not search our parent or siblings.
+                                * Protected against the rmap code by
+                                * the page lock.
+                                */
+                               page_move_anon_rmap(compound_head(old_page),
+                                                   vma, address);
+                       }
                        unlock_page(old_page);
                        return wp_page_reuse(mm, vma, address, page_table, ptl,
                                             orig_pte, old_page, 0, 0);
@@ -2617,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        inc_mm_counter_fast(mm, MM_ANONPAGES);
        dec_mm_counter_fast(mm, MM_SWAPENTS);
        pte = mk_pte(page, vma->vm_page_prot);
-       if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
+       if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                flags &= ~FAULT_FLAG_WRITE;
                ret |= VM_FAULT_WRITE;
index 83874eced5bfa0ac4c889cb0d65ecf22cfa24af0..031713ab40ce9fdeb289e149605e6d1514f345c0 100644 (file)
@@ -922,18 +922,19 @@ out:
  * to it.  And as a side-effect, free up its swap: because the old content
  * on disk will never be read, and seeking back there to write new content
  * later would only waste time away from clustering.
+ *
+ * NOTE: total_mapcount should not be relied upon by the caller if
+ * reuse_swap_page() returns false, but it may be always overwritten
+ * (see the other implementation for CONFIG_SWAP=n).
  */
-int reuse_swap_page(struct page *page)
+bool reuse_swap_page(struct page *page, int *total_mapcount)
 {
        int count;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        if (unlikely(PageKsm(page)))
-               return 0;
-       /* The page is part of THP and cannot be reused */
-       if (PageTransCompound(page))
-               return 0;
-       count = page_mapcount(page);
+               return false;
+       count = page_trans_huge_mapcount(page, total_mapcount);
        if (count <= 1 && PageSwapCache(page)) {
                count += page_swapcount(page);
                if (count == 1 && !PageWriteback(page)) {
index e72efb109fde5e5dc23007fa302a21cba7d103c5..fe47fbba995abd4e9911f58f9b9741ed7cc8b08a 100644 (file)
@@ -1735,10 +1735,13 @@ static struct page *isolate_source_page(struct size_class *class)
 static unsigned long zs_can_compact(struct size_class *class)
 {
        unsigned long obj_wasted;
+       unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+       unsigned long obj_used = zs_stat_get(class, OBJ_USED);
 
-       obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
-               zs_stat_get(class, OBJ_USED);
+       if (obj_allocated <= obj_used)
+               return 0;
 
+       obj_wasted = obj_allocated - obj_used;
        obj_wasted /= get_maxobj_per_zspage(class->size,
                        class->pages_per_zspage);
 
index b841c42e5c9b7d38b0b62c9d6a31d0a61cc27409..ff40562a782ccfc323eb12e05e01596dcd6befb3 100644 (file)
@@ -289,14 +289,17 @@ config BQL
 
 config BPF_JIT
        bool "enable BPF Just In Time compiler"
-       depends on HAVE_BPF_JIT
+       depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
        depends on MODULES
        ---help---
          Berkeley Packet Filter filtering capabilities are normally handled
          by an interpreter. This option allows kernel to generate a native
          code when filter is loaded in memory. This should speedup
-         packet sniffing (libpcap/tcpdump). Note : Admin should enable
-         this feature changing /proc/sys/net/core/bpf_jit_enable
+         packet sniffing (libpcap/tcpdump).
+
+         Note, admin should enable this feature changing:
+         /proc/sys/net/core/bpf_jit_enable
+         /proc/sys/net/core/bpf_jit_harden (optional)
 
 config NET_FLOW_LIMIT
        bool
@@ -419,6 +422,14 @@ config MAY_USE_DEVLINK
 
 endif   # if NET
 
-# Used by archs to tell that they support BPF_JIT
-config HAVE_BPF_JIT
+# Used by archs to tell that they support BPF JIT compiler plus which flavour.
+# Only one of the two can be selected for a specific arch since eBPF JIT supersedes
+# the cBPF JIT.
+
+# Classic BPF JIT (cBPF)
+config HAVE_CBPF_JIT
+       bool
+
+# Extended BPF JIT (eBPF)
+config HAVE_EBPF_JIT
        bool
index 71c2a1f473adb82a77974cc377f88b5c26f5d42a..68adb5f52110d85fead496a8a76a7248ae8cefae 100644 (file)
@@ -994,7 +994,11 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
                 */
                goto out_err_free;
 
-       bpf_prog_select_runtime(fp);
+       /* We are guaranteed to never error here with cBPF to eBPF
+        * transitions, since there's no issue with type compatibility
+        * checks on program arrays.
+        */
+       fp = bpf_prog_select_runtime(fp, &err);
 
        kfree(old_prog);
        return fp;
@@ -2069,16 +2073,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
 
 static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 {
-       /* check bounds */
        if (off < 0 || off >= sizeof(struct __sk_buff))
                return false;
-
-       /* disallow misaligned access */
+       /* The verifier guarantees that size > 0. */
        if (off % size != 0)
                return false;
-
-       /* all __sk_buff fields are __u32 */
-       if (size != 4)
+       if (size != sizeof(__u32))
                return false;
 
        return true;
@@ -2097,7 +2097,7 @@ static bool sk_filter_is_valid_access(int off, int size,
        if (type == BPF_WRITE) {
                switch (off) {
                case offsetof(struct __sk_buff, cb[0]) ...
-                       offsetof(struct __sk_buff, cb[4]):
+                    offsetof(struct __sk_buff, cb[4]):
                        break;
                default:
                        return false;
@@ -2278,30 +2278,30 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
 }
 
 static const struct bpf_verifier_ops sk_filter_ops = {
-       .get_func_proto = sk_filter_func_proto,
-       .is_valid_access = sk_filter_is_valid_access,
-       .convert_ctx_access = bpf_net_convert_ctx_access,
+       .get_func_proto         = sk_filter_func_proto,
+       .is_valid_access        = sk_filter_is_valid_access,
+       .convert_ctx_access     = bpf_net_convert_ctx_access,
 };
 
 static const struct bpf_verifier_ops tc_cls_act_ops = {
-       .get_func_proto = tc_cls_act_func_proto,
-       .is_valid_access = tc_cls_act_is_valid_access,
-       .convert_ctx_access = bpf_net_convert_ctx_access,
+       .get_func_proto         = tc_cls_act_func_proto,
+       .is_valid_access        = tc_cls_act_is_valid_access,
+       .convert_ctx_access     = bpf_net_convert_ctx_access,
 };
 
 static struct bpf_prog_type_list sk_filter_type __read_mostly = {
-       .ops = &sk_filter_ops,
-       .type = BPF_PROG_TYPE_SOCKET_FILTER,
+       .ops    = &sk_filter_ops,
+       .type   = BPF_PROG_TYPE_SOCKET_FILTER,
 };
 
 static struct bpf_prog_type_list sched_cls_type __read_mostly = {
-       .ops = &tc_cls_act_ops,
-       .type = BPF_PROG_TYPE_SCHED_CLS,
+       .ops    = &tc_cls_act_ops,
+       .type   = BPF_PROG_TYPE_SCHED_CLS,
 };
 
 static struct bpf_prog_type_list sched_act_type __read_mostly = {
-       .ops = &tc_cls_act_ops,
-       .type = BPF_PROG_TYPE_SCHED_ACT,
+       .ops    = &tc_cls_act_ops,
+       .type   = BPF_PROG_TYPE_SCHED_ACT,
 };
 
 static int __init register_sk_filter_ops(void)
index a6beb7b6ae556dff501413d6661d9c4655502e36..0df2aa6525308a365d89f57f6da76d57a24238f0 100644 (file)
@@ -294,6 +294,15 @@ static struct ctl_table net_core_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+# ifdef CONFIG_HAVE_EBPF_JIT
+       {
+               .procname       = "bpf_jit_harden",
+               .data           = &bpf_jit_harden,
+               .maxlen         = sizeof(int),
+               .mode           = 0600,
+               .proc_handler   = proc_dointvec,
+       },
+# endif
 #endif
        {
                .procname       = "netdev_tstamp_prequeue",
index ab64d9f2eef91dc22829a2eb442a6c887c9a8838..d09173bf95005be87d4d0cbcc754147b3c8e62c9 100644 (file)
@@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
                        val = 65535 - 40;
                if (type == RTAX_MTU && val > 65535 - 15)
                        val = 65535 - 15;
+               if (type == RTAX_HOPLIMIT && val > 255)
+                       val = 255;
                if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
                        return -EINVAL;
                fi->fib_metrics[type - 1] = val;
index aaeb478b54cd2657ae8c3d838141a61b9d5eb413..4d2025f7ec578b7e3f3fd342e82e734d36395068 100644 (file)
@@ -276,7 +276,10 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
                                           raw_proto, false) < 0)
                        goto drop;
 
-               skb_pop_mac_header(skb);
+               if (tunnel->dev->type != ARPHRD_NONE)
+                       skb_pop_mac_header(skb);
+               else
+                       skb_reset_mac_header(skb);
                if (tunnel->collect_md) {
                        __be16 flags;
                        __be64 tun_id;
@@ -884,6 +887,8 @@ static void ipgre_netlink_parms(struct net_device *dev,
                struct ip_tunnel *t = netdev_priv(dev);
 
                t->collect_md = true;
+               if (dev->type == ARPHRD_IPGRE)
+                       dev->type = ARPHRD_NONE;
        }
 }
 
index 5805762d7fc79a702f1d67e802992b822f66f000..71a52f4d4cffba2db9353f43dc817689bf4fab10 100644 (file)
@@ -247,8 +247,9 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
                }
 #endif
                if (cmsg->cmsg_level == SOL_SOCKET) {
-                       if (__sock_cmsg_send(sk, msg, cmsg, &ipc->sockc))
-                               return -EINVAL;
+                       err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
+                       if (err)
+                               return err;
                        continue;
                }
 
index 8219d0d8dc8370d0d3e6fc4cd17b4925617968ab..3708de2a66833cf1d4a221a2b6ce3923bde978c4 100644 (file)
@@ -1556,9 +1556,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
 
-       th = tcp_hdr(skb);
+       th = (const struct tcphdr *)skb->data;
 
-       if (th->doff < sizeof(struct tcphdr) / 4)
+       if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
                goto bad_packet;
        if (!pskb_may_pull(skb, th->doff * 4))
                goto discard_it;
@@ -1571,7 +1571,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
        if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
                goto csum_error;
 
-       th = tcp_hdr(skb);
+       th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
        /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
         * barrier() makes sure compiler wont play fool^Waliasing games.
index 8daefd8b1b495f23e45b6c9e6b70525715498c08..8bd9911fdd163ab739d2dfda34e053b5e8e0bc03 100644 (file)
@@ -364,7 +364,7 @@ tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
  * be sent.
  */
 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
-                               int tcp_header_len)
+                        struct tcphdr *th, int tcp_header_len)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -375,7 +375,7 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
                        INET_ECN_xmit(sk);
                        if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
                                tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
-                               tcp_hdr(skb)->cwr = 1;
+                               th->cwr = 1;
                                skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
                        }
                } else if (!tcp_ca_needs_ecn(sk)) {
@@ -383,7 +383,7 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
                        INET_ECN_dontxmit(sk);
                }
                if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
-                       tcp_hdr(skb)->ece = 1;
+                       th->ece = 1;
        }
 }
 
@@ -954,7 +954,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
        /* Build TCP header and checksum it. */
-       th = tcp_hdr(skb);
+       th = (struct tcphdr *)skb->data;
        th->source              = inet->inet_sport;
        th->dest                = inet->inet_dport;
        th->seq                 = htonl(tcb->seq);
@@ -962,14 +962,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        *(((__be16 *)th) + 6)   = htons(((tcp_header_size >> 2) << 12) |
                                        tcb->tcp_flags);
 
-       if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
-               /* RFC1323: The window in SYN & SYN/ACK segments
-                * is never scaled.
-                */
-               th->window      = htons(min(tp->rcv_wnd, 65535U));
-       } else {
-               th->window      = htons(tcp_select_window(sk));
-       }
        th->check               = 0;
        th->urg_ptr             = 0;
 
@@ -986,9 +978,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
        tcp_options_write((__be32 *)(th + 1), tp, &opts);
        skb_shinfo(skb)->gso_type = sk->sk_gso_type;
-       if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
-               tcp_ecn_send(sk, skb, tcp_header_size);
-
+       if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
+               th->window      = htons(tcp_select_window(sk));
+               tcp_ecn_send(sk, skb, th, tcp_header_size);
+       } else {
+               /* RFC1323: The window in SYN & SYN/ACK segments
+                * is never scaled.
+                */
+               th->window      = htons(min(tp->rcv_wnd, 65535U));
+       }
 #ifdef CONFIG_TCP_MD5SIG
        /* Calculate the MD5 hash, as we have all we need now */
        if (md5) {
@@ -2658,8 +2656,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
         */
        if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
                     skb_headroom(skb) >= 0xFFFF)) {
-               struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
-                                                  GFP_ATOMIC);
+               struct sk_buff *nskb;
+
+               skb_mstamp_get(&skb->skb_mstamp);
+               nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
                err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
                             -ENOBUFS;
        } else {
@@ -3038,7 +3038,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
 
-       th = tcp_hdr(skb);
+       th = (struct tcphdr *)skb->data;
        memset(th, 0, sizeof(struct tcphdr));
        th->syn = 1;
        th->ack = 1;
index 00d0c2903173a96571983216f2839a93059cad22..37874e2f30edf98f31e2a5097761143d507d5b95 100644 (file)
@@ -746,8 +746,9 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
                }
 
                if (cmsg->cmsg_level == SOL_SOCKET) {
-                       if (__sock_cmsg_send(sk, msg, cmsg, sockc))
-                               return -EINVAL;
+                       err = __sock_cmsg_send(sk, msg, cmsg, sockc);
+                       if (err)
+                               return err;
                        continue;
                }
 
index c42fa1deb152c6c3292d705b3d6157c32bd3f837..969913da494fdf1d80ce674c3b6c421fdab18d3d 100644 (file)
@@ -1750,6 +1750,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
                } else {
                        val = nla_get_u32(nla);
                }
+               if (type == RTAX_HOPLIMIT && val > 255)
+                       val = 255;
                if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
                        goto err;
 
index c4efaa97280c20918866e3311bb756abf0a325c4..79e33e02f11accfd5a6c8cf2751df0a8793ab016 100644 (file)
@@ -1369,9 +1369,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
 
-       th = tcp_hdr(skb);
+       th = (const struct tcphdr *)skb->data;
 
-       if (th->doff < sizeof(struct tcphdr)/4)
+       if (unlikely(th->doff < sizeof(struct tcphdr)/4))
                goto bad_packet;
        if (!pskb_may_pull(skb, th->doff*4))
                goto discard_it;
@@ -1379,7 +1379,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
        if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
                goto csum_error;
 
-       th = tcp_hdr(skb);
+       th = (const struct tcphdr *)skb->data;
        hdr = ipv6_hdr(skb);
 
 lookup:
index 566c64e3ec50be4a1340a45bc2b9ea756197912b..db2312eeb2a47c44db0f0ac5a529a10a0a8f8d2f 100644 (file)
@@ -75,6 +75,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
 static __read_mostly seqcount_t nf_conntrack_generation;
+static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
index d016066a25e37a44d5d6bb58204058e87f6b974f..1b4de4bd695865c9bb9b2ca70b8b309d902ccc10 100644 (file)
@@ -96,6 +96,8 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
                        return -EINVAL;
                if (flags & NFACCT_F_OVERQUOTA)
                        return -EINVAL;
+               if ((flags & NFACCT_F_QUOTA) && !tb[NFACCT_QUOTA])
+                       return -EINVAL;
 
                size += sizeof(u64);
        }
index 29d2c31f406ca585d5f0eb1f08bcaf26d8364053..daf45da448fab4406cf4b5727404c88c1f0759be 100644 (file)
@@ -236,6 +236,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
 
                list_del(&info->timer->entry);
                del_timer_sync(&info->timer->timer);
+               cancel_work_sync(&info->timer->work);
                sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
                kfree(info->timer->attr.attr.name);
                kfree(info->timer);
index 9f0bc49fa96956c6360f5972411f5fdb3961b788..3d5feede962dc584408e9b5a79e4b723f5308319 100644 (file)
@@ -768,6 +768,19 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                        return -EINVAL;
                }
 
+               /* Userspace may decide to perform a ct lookup without a helper
+                * specified followed by a (recirculate and) commit with one.
+                * Therefore, for unconfirmed connections which we will commit,
+                * we need to attach the helper here.
+                */
+               if (!nf_ct_is_confirmed(ct) && info->commit &&
+                   info->helper && !nfct_help(ct)) {
+                       int err = __nf_ct_try_assign_helper(ct, info->ct,
+                                                           GFP_ATOMIC);
+                       if (err)
+                               return err;
+               }
+
                /* Call the helper only if:
                 * - nf_conntrack_in() was executed above ("!cached") for a
                 *   confirmed connection, or
index 673fd1f86ebeee8094d1f5314f45c7ebf27a3143..b83c6807a5ae5cedc63b073c7f928b1776f42524 100644 (file)
@@ -16,7 +16,7 @@ if QRTR
 
 config QRTR_SMD
        tristate "SMD IPC Router channels"
-       depends on QCOM_SMD || COMPILE_TEST
+       depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
        ---help---
          Say Y here to support SMD based ipcrouter channels.  SMD is the
          most common transport for IPC Router.
index 6c00dc623b7eb61a64ef2d245b381550b5fcc427..ab09e40f7c74b9183397a78afc701cd24a4cab3c 100644 (file)
@@ -1,2 +1,4 @@
 obj-$(CONFIG_QRTR) := qrtr.o
-obj-$(CONFIG_QRTR_SMD) += smd.o
+
+obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o
+qrtr-smd-y     := smd.o
index 1a6e09fbb2a590f99f08cf4a5ebecba5ec4f996f..ec5cc8435238695d5fda1282ee8ae7e1890dd2a7 100644 (file)
@@ -148,6 +148,20 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
        return action;
 }
 
+static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
+                                 u64 lastuse)
+{
+       struct tcf_gact *gact = a->priv;
+       int action = READ_ONCE(gact->tcf_action);
+       struct tcf_t *tm = &gact->tcf_tm;
+
+       _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes, packets);
+       if (action == TC_ACT_SHOT)
+               this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
+
+       tm->lastuse = lastuse;
+}
+
 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
@@ -207,6 +221,7 @@ static struct tc_action_ops act_gact_ops = {
        .type           =       TCA_ACT_GACT,
        .owner          =       THIS_MODULE,
        .act            =       tcf_gact,
+       .stats_update   =       tcf_gact_stats_update,
        .dump           =       tcf_gact_dump,
        .init           =       tcf_gact_init,
        .walk           =       tcf_gact_walker,
index 556f44c9c454b2519971671943d6aa3540132e41..658046dfe02d7210501ee9f0324158b50f6e7858 100644 (file)
@@ -423,7 +423,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        u16 ife_type = 0;
        u8 *daddr = NULL;
        u8 *saddr = NULL;
-       int ret = 0;
+       int ret = 0, exists = 0;
        int err;
 
        err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
@@ -435,25 +435,29 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
 
        parm = nla_data(tb[TCA_IFE_PARMS]);
 
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
        if (parm->flags & IFE_ENCODE) {
                /* Until we get issued the ethertype, we cant have
                 * a default..
                **/
                if (!tb[TCA_IFE_TYPE]) {
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        pr_info("You MUST pass etherype for encoding\n");
                        return -EINVAL;
                }
        }
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       if (!exists) {
                ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife),
                                      bind, false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
        } else {
-               if (bind)       /* dont override defaults */
-                       return 0;
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
@@ -495,6 +499,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                                       NULL);
                if (err) {
 metadata_parse_err:
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        if (ret == ACT_P_CREATED)
                                _tcf_ife_cleanup(a, bind);
 
index 1464f6a09446bf0e36985d0921c9695b0e84b68d..9f002ada7074ccdb648433b443870562962c8694 100644 (file)
@@ -96,7 +96,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
        struct tcf_ipt *ipt;
        struct xt_entry_target *td, *t;
        char *tname;
-       int ret = 0, err;
+       int ret = 0, err, exists = 0;
        u32 hook = 0;
        u32 index = 0;
 
@@ -107,18 +107,23 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
        if (err < 0)
                return err;
 
-       if (tb[TCA_IPT_HOOK] == NULL)
-               return -EINVAL;
-       if (tb[TCA_IPT_TARG] == NULL)
+       if (tb[TCA_IPT_INDEX] != NULL)
+               index = nla_get_u32(tb[TCA_IPT_INDEX]);
+
+       exists = tcf_hash_check(tn, index, a, bind);
+       if (exists && bind)
+               return 0;
+
+       if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
+               if (exists)
+                       tcf_hash_release(a, bind);
                return -EINVAL;
+       }
 
        td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
        if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
                return -EINVAL;
 
-       if (tb[TCA_IPT_INDEX] != NULL)
-               index = nla_get_u32(tb[TCA_IPT_INDEX]);
-
        if (!tcf_hash_check(tn, index, a, bind)) {
                ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
                                      false);
index dea57c1ec90c31f73d3edfde057a92e0693590ab..7737cdb7d5745e0202e79717b1f9d46cd4682b59 100644 (file)
@@ -61,7 +61,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        struct tc_mirred *parm;
        struct tcf_mirred *m;
        struct net_device *dev;
-       int ret, ok_push = 0;
+       int ret, ok_push = 0, exists = 0;
 
        if (nla == NULL)
                return -EINVAL;
@@ -71,17 +71,27 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_MIRRED_PARMS] == NULL)
                return -EINVAL;
        parm = nla_data(tb[TCA_MIRRED_PARMS]);
+
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
        switch (parm->eaction) {
        case TCA_EGRESS_MIRROR:
        case TCA_EGRESS_REDIR:
                break;
        default:
+               if (exists)
+                       tcf_hash_release(a, bind);
                return -EINVAL;
        }
        if (parm->ifindex) {
                dev = __dev_get_by_index(net, parm->ifindex);
-               if (dev == NULL)
+               if (dev == NULL) {
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        return -ENODEV;
+               }
                switch (dev->type) {
                case ARPHRD_TUNNEL:
                case ARPHRD_TUNNEL6:
@@ -99,7 +109,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                dev = NULL;
        }
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       if (!exists) {
                if (dev == NULL)
                        return -EINVAL;
                ret = tcf_hash_create(tn, parm->index, est, a,
@@ -108,9 +118,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                        return ret;
                ret = ACT_P_CREATED;
        } else {
-               if (bind)
-                       return 0;
-
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
index 2057fd56d74c401edc8a43257e5144c4375a5495..e42f8daca14796f4efd5175e2b298e9a70c2f2c9 100644 (file)
@@ -87,7 +87,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        struct tc_defact *parm;
        struct tcf_defact *d;
        char *defdata;
-       int ret = 0, err;
+       int ret = 0, err, exists = 0;
 
        if (nla == NULL)
                return -EINVAL;
@@ -99,13 +99,21 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_DEF_PARMS] == NULL)
                return -EINVAL;
 
-       if (tb[TCA_DEF_DATA] == NULL)
-               return -EINVAL;
 
        parm = nla_data(tb[TCA_DEF_PARMS]);
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
+       if (tb[TCA_DEF_DATA] == NULL) {
+               if (exists)
+                       tcf_hash_release(a, bind);
+               return -EINVAL;
+       }
+
        defdata = nla_data(tb[TCA_DEF_DATA]);
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       if (!exists) {
                ret = tcf_hash_create(tn, parm->index, est, a,
                                      sizeof(*d), bind, false);
                if (ret)
@@ -122,8 +130,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        } else {
                d = to_defact(a);
 
-               if (bind)
-                       return 0;
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
index 51b24998904f68764c5232ec8c6b7d5a0b053653..e928802966bce46d02658102acaee9f717d0f3a6 100644 (file)
@@ -69,7 +69,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
        struct tcf_skbedit *d;
        u32 flags = 0, *priority = NULL, *mark = NULL;
        u16 *queue_mapping = NULL;
-       int ret = 0, err;
+       int ret = 0, err, exists = 0;
 
        if (nla == NULL)
                return -EINVAL;
@@ -96,12 +96,18 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                mark = nla_data(tb[TCA_SKBEDIT_MARK]);
        }
 
-       if (!flags)
-               return -EINVAL;
-
        parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
+       if (!flags) {
+               tcf_hash_release(a, bind);
+               return -EINVAL;
+       }
+
+       if (!exists) {
                ret = tcf_hash_create(tn, parm->index, est, a,
                                      sizeof(*d), bind, false);
                if (ret)
@@ -111,8 +117,6 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                ret = ACT_P_CREATED;
        } else {
                d = to_skbedit(a);
-               if (bind)
-                       return 0;
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
index c1682ab9bc7ec342ea8a6322eca0595bdfaba5e4..ac4adc812c12eef8f565c1fd773b2885c647ba24 100644 (file)
@@ -77,7 +77,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        int action;
        __be16 push_vid = 0;
        __be16 push_proto = 0;
-       int ret = 0;
+       int ret = 0, exists = 0;
        int err;
 
        if (!nla)
@@ -90,15 +90,25 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        if (!tb[TCA_VLAN_PARMS])
                return -EINVAL;
        parm = nla_data(tb[TCA_VLAN_PARMS]);
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
        switch (parm->v_action) {
        case TCA_VLAN_ACT_POP:
                break;
        case TCA_VLAN_ACT_PUSH:
-               if (!tb[TCA_VLAN_PUSH_VLAN_ID])
+               if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        return -EINVAL;
+               }
                push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
-               if (push_vid >= VLAN_VID_MASK)
+               if (push_vid >= VLAN_VID_MASK) {
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        return -ERANGE;
+               }
 
                if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
                        push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
@@ -114,11 +124,13 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                }
                break;
        default:
+               if (exists)
+                       tcf_hash_release(a, bind);
                return -EINVAL;
        }
        action = parm->v_action;
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       if (!exists) {
                ret = tcf_hash_create(tn, parm->index, est, a,
                                      sizeof(*v), bind, false);
                if (ret)
@@ -126,8 +138,6 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
 
                ret = ACT_P_CREATED;
        } else {
-               if (bind)
-                       return 0;
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
index 2181ffc76638035a93c077e7395d93892e82570e..730aacafc22d8638ccebce757cc5b6f8968bd39f 100644 (file)
@@ -210,6 +210,25 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
        dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
 }
 
+static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
+{
+       struct net_device *dev = tp->q->dev_queue->dev;
+       struct tc_cls_flower_offload offload = {0};
+       struct tc_to_netdev tc;
+
+       if (!tc_should_offload(dev, 0))
+               return;
+
+       offload.command = TC_CLSFLOWER_STATS;
+       offload.cookie = (unsigned long)f;
+       offload.exts = &f->exts;
+
+       tc.type = TC_SETUP_CLSFLOWER;
+       tc.cls_flower = &offload;
+
+       dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
+}
+
 static bool fl_destroy(struct tcf_proto *tp, bool force)
 {
        struct cls_fl_head *head = rtnl_dereference(tp->root);
@@ -662,6 +681,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
                        goto nla_put_failure;
        }
 
+       fl_hw_update_stats(tp, f);
+
        if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
                            mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
                            sizeof(key->eth.dst)) ||
index e64877a3c084339de22c08270d4cdb3a496b5c6d..079b43b3c5d24dc5eaf7bc5e7b7d0d4d0c672ca2 100644 (file)
@@ -134,6 +134,11 @@ next_knode:
                j = 0;
 #endif
 
+               if (tc_skip_sw(n->flags)) {
+                       n = rcu_dereference_bh(n->next);
+                       goto next_knode;
+               }
+
 #ifdef CONFIG_CLS_U32_MARK
                if ((skb->mark & n->mask) != n->val) {
                        n = rcu_dereference_bh(n->next);
@@ -443,13 +448,14 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
        }
 }
 
-static void u32_replace_hw_hnode(struct tcf_proto *tp,
+static int u32_replace_hw_hnode(struct tcf_proto *tp,
                                 struct tc_u_hnode *h,
                                 u32 flags)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
        struct tc_cls_u32_offload u32_offload = {0};
        struct tc_to_netdev offload;
+       int err;
 
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
@@ -460,9 +466,13 @@ static void u32_replace_hw_hnode(struct tcf_proto *tp,
                offload.cls_u32->hnode.handle = h->handle;
                offload.cls_u32->hnode.prio = h->prio;
 
-               dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                             tp->protocol, &offload);
+               err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                                   tp->protocol, &offload);
+               if (tc_skip_sw(flags))
+                       return err;
        }
+
+       return 0;
 }
 
 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
@@ -485,13 +495,14 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
        }
 }
 
-static void u32_replace_hw_knode(struct tcf_proto *tp,
+static int u32_replace_hw_knode(struct tcf_proto *tp,
                                 struct tc_u_knode *n,
                                 u32 flags)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
        struct tc_cls_u32_offload u32_offload = {0};
        struct tc_to_netdev offload;
+       int err;
 
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
@@ -512,9 +523,13 @@ static void u32_replace_hw_knode(struct tcf_proto *tp,
                if (n->ht_down)
                        offload.cls_u32->knode.link_handle = n->ht_down->handle;
 
-               dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                             tp->protocol, &offload);
+               err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                                   tp->protocol, &offload);
+               if (tc_skip_sw(flags))
+                       return err;
        }
+
+       return 0;
 }
 
 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
@@ -845,8 +860,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        if (err < 0)
                return err;
 
-       if (tb[TCA_U32_FLAGS])
+       if (tb[TCA_U32_FLAGS]) {
                flags = nla_get_u32(tb[TCA_U32_FLAGS]);
+               if (!tc_flags_valid(flags))
+                       return err;
+       }
 
        n = (struct tc_u_knode *)*arg;
        if (n) {
@@ -871,10 +889,15 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                        return err;
                }
 
+               err = u32_replace_hw_knode(tp, new, flags);
+               if (err) {
+                       u32_destroy_key(tp, new, false);
+                       return err;
+               }
+
                u32_replace_knode(tp, tp_c, new);
                tcf_unbind_filter(tp, &n->res);
                call_rcu(&n->rcu, u32_delete_key_rcu);
-               u32_replace_hw_knode(tp, new, flags);
                return 0;
        }
 
@@ -978,6 +1001,10 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                struct tc_u_knode __rcu **ins;
                struct tc_u_knode *pins;
 
+               err = u32_replace_hw_knode(tp, n, flags);
+               if (err)
+                       goto errhw;
+
                ins = &ht->ht[TC_U32_HASH(handle)];
                for (pins = rtnl_dereference(*ins); pins;
                     ins = &pins->next, pins = rtnl_dereference(*ins))
@@ -986,11 +1013,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
 
                RCU_INIT_POINTER(n->next, pins);
                rcu_assign_pointer(*ins, n);
-               u32_replace_hw_knode(tp, n, flags);
                *arg = (unsigned long)n;
                return 0;
        }
 
+errhw:
 #ifdef CONFIG_CLS_U32_MARK
        free_percpu(n->pcpu_success);
 errout:
index 86a47e17cfaf7cba058a0ed49512954789b9d9a0..651fa201a570f7c3c5bfa30091829650e56ad246 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config NET_SWITCHDEV
-       bool "Switch (and switch-ish) device support (EXPERIMENTAL)"
+       bool "Switch (and switch-ish) device support"
        depends on INET
        ---help---
          This module provides glue between core networking code and device
index 7ecd04c21360994627f7b9ec0231bf454510fd46..997ff7b2509b49a3da6d3183fe65512f8f4caa38 100644 (file)
@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
 
        memset(&theirs, 0, sizeof(theirs));
        memcpy(new, ours, sizeof(*new));
+       memset(dte, 0, sizeof(*dte));
 
        len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
        if (len < 0)
index 64e0d1d81ca5afd66625669079bdce05fa23dc63..9739fce9e032c3a2126a75b1148c21c5e3502bdd 100644 (file)
@@ -139,14 +139,6 @@ static int reconfig_codec(struct hda_codec *codec)
                goto error;
        }
        err = snd_hda_codec_configure(codec);
-       if (err < 0)
-               goto error;
-       /* rebuild PCMs */
-       err = snd_hda_codec_build_pcms(codec);
-       if (err < 0)
-               goto error;
-       /* rebuild mixers */
-       err = snd_hda_codec_build_controls(codec);
        if (err < 0)
                goto error;
        err = snd_card_register(codec->card);
index 1483f85999ecd82d1b9215f5a634011819fd368a..a010d704e0e20b066d912349088cfe0a27de0c31 100644 (file)
@@ -3401,6 +3401,9 @@ static int patch_atihdmi(struct hda_codec *codec)
        spec->ops.pin_hbr_setup = atihdmi_pin_hbr_setup;
        spec->ops.setup_stream = atihdmi_setup_stream;
 
+       spec->chmap.ops.pin_get_slot_channel = atihdmi_pin_get_slot_channel;
+       spec->chmap.ops.pin_set_slot_channel = atihdmi_pin_set_slot_channel;
+
        if (!has_amd_full_remap_support(codec)) {
                /* override to ATI/AMD-specific versions with pairwise mapping */
                spec->chmap.ops.chmap_cea_alloc_validate_get_type =
@@ -3408,10 +3411,6 @@ static int patch_atihdmi(struct hda_codec *codec)
                spec->chmap.ops.cea_alloc_to_tlv_chmap =
                                atihdmi_paired_cea_alloc_to_tlv_chmap;
                spec->chmap.ops.chmap_validate = atihdmi_paired_chmap_validate;
-               spec->chmap.ops.pin_get_slot_channel =
-                               atihdmi_pin_get_slot_channel;
-               spec->chmap.ops.pin_set_slot_channel =
-                               atihdmi_pin_set_slot_channel;
        }
 
        /* ATI/AMD converters do not advertise all of their capabilities */
index ac4490a968638ff7eed3b4007ccdcda8a7b81cea..4918ffa5ba6829e102519b3a7af700b049d93ae2 100644 (file)
@@ -6426,6 +6426,7 @@ enum {
        ALC668_FIXUP_DELL_DISABLE_AAMIX,
        ALC668_FIXUP_DELL_XPS13,
        ALC662_FIXUP_ASUS_Nx50,
+       ALC668_FIXUP_ASUS_Nx51,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -6672,6 +6673,15 @@ static const struct hda_fixup alc662_fixups[] = {
                .chained = true,
                .chain_id = ALC662_FIXUP_BASS_1A
        },
+       [ALC668_FIXUP_ASUS_Nx51] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       {0x1a, 0x90170151}, /* bass speaker */
+                       {}
+               },
+               .chained = true,
+               .chain_id = ALC662_FIXUP_BASS_CHMAP,
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6694,11 +6704,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+       SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
        SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
        SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
        SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
        SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
+       SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
+       SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
        SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
        SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
index 0adfd9537cf766bbe130c71c4cc029091b2d4118..6adde457b602e08aedd1806e8b79863d65e006cc 100644 (file)
@@ -1137,8 +1137,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+       case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+       case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
        case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+       case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
        case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
                return true;
        }
index 6b7707270aa3b19791c8b6248f90ecddeabc1fdd..9f878619077aeb9ecbb50f87b381706d083def37 100644 (file)
@@ -30,6 +30,7 @@ endef
 FEATURE_TESTS_BASIC :=                 \
        backtrace                       \
        dwarf                           \
+       dwarf_getlocations              \
        fortify-source                  \
        sync-compare-and-swap           \
        glibc                           \
@@ -78,6 +79,7 @@ endif
 
 FEATURE_DISPLAY ?=                     \
        dwarf                           \
+       dwarf_getlocations              \
        glibc                           \
        gtk2                            \
        libaudit                        \
index c5f4c417428d7099fbe4f487a179b0663f478611..4ae94dbfdab98d5181e18d9d8864a24f942bdc53 100644 (file)
@@ -3,6 +3,7 @@ FILES=                                  \
        test-backtrace.bin              \
        test-bionic.bin                 \
        test-dwarf.bin                  \
+       test-dwarf_getlocations.bin     \
        test-fortify-source.bin         \
        test-sync-compare-and-swap.bin  \
        test-glibc.bin                  \
@@ -82,6 +83,9 @@ endif
 $(OUTPUT)test-dwarf.bin:
        $(BUILD) $(DWARFLIBS)
 
+$(OUTPUT)test-dwarf_getlocations.bin:
+       $(BUILD) $(DWARFLIBS)
+
 $(OUTPUT)test-libelf-mmap.bin:
        $(BUILD) -lelf
 
index e499a36c1e4a9e21e9c355309b53a7dc5901664a..a282e8cb84f308da358983ebccbf80c612e7d061 100644 (file)
 # include "test-dwarf.c"
 #undef main
 
+#define main main_test_dwarf_getlocations
+# include "test-dwarf_getlocations.c"
+#undef main
+
 #define main main_test_libelf_getphdrnum
 # include "test-libelf-getphdrnum.c"
 #undef main
@@ -143,6 +147,7 @@ int main(int argc, char *argv[])
        main_test_libelf_mmap();
        main_test_glibc();
        main_test_dwarf();
+       main_test_dwarf_getlocations();
        main_test_libelf_getphdrnum();
        main_test_libunwind();
        main_test_libaudit();
diff --git a/tools/build/feature/test-dwarf_getlocations.c b/tools/build/feature/test-dwarf_getlocations.c
new file mode 100644 (file)
index 0000000..7016269
--- /dev/null
@@ -0,0 +1,12 @@
+#include <stdlib.h>
+#include <elfutils/libdw.h>
+
+int main(void)
+{
+       Dwarf_Addr base, start, end;
+       Dwarf_Attribute attr;
+       Dwarf_Op *op;
+        size_t nops;
+       ptrdiff_t offset = 0;
+        return (int)dwarf_getlocations(&attr, offset, &base, &start, &end, &op, &nops);
+}
index 0144b3d1bb77ac63441c77e1bb24e3405cfe3d8f..88cccea3ca9910314bf8024545c76620051fa263 100644 (file)
@@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
                current_op = current_exp;
 
        ret = collapse_tree(current_op, parg, error_str);
+       /* collapse_tree() may free current_op, and updates parg accordingly */
+       current_op = NULL;
        if (ret < 0)
                goto fail;
 
-       *parg = current_op;
-
        free(token);
        return 0;
 
index 9223c164e545d869267b9b7a17d409b774dd7904..1f86ee8fb831c99e8d22ead64ca33ec11a816985 100644 (file)
@@ -63,6 +63,8 @@ struct pt_regs_offset {
 # define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
 #endif
 
+/* TODO: switching by dwarf address size */
+#ifndef __x86_64__
 static const struct pt_regs_offset x86_32_regoffset_table[] = {
        REG_OFFSET_NAME_32("%ax",       eax),
        REG_OFFSET_NAME_32("%cx",       ecx),
@@ -75,6 +77,8 @@ static const struct pt_regs_offset x86_32_regoffset_table[] = {
        REG_OFFSET_END,
 };
 
+#define regoffset_table x86_32_regoffset_table
+#else
 static const struct pt_regs_offset x86_64_regoffset_table[] = {
        REG_OFFSET_NAME_64("%ax",       rax),
        REG_OFFSET_NAME_64("%dx",       rdx),
@@ -95,11 +99,7 @@ static const struct pt_regs_offset x86_64_regoffset_table[] = {
        REG_OFFSET_END,
 };
 
-/* TODO: switching by dwarf address size */
-#ifdef __x86_64__
 #define regoffset_table x86_64_regoffset_table
-#else
-#define regoffset_table x86_32_regoffset_table
 #endif
 
 /* Minus 1 for the ending REG_OFFSET_END */
index 3770c3dffe5e141e6c3af2736b9a3e0d41c81cc4..52826696c8528d6f17773b9d4359ea7053cc7e0e 100644 (file)
@@ -1415,21 +1415,19 @@ static int is_directory(const char *base_path, const struct dirent *dent)
        return S_ISDIR(st.st_mode);
 }
 
-#define for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next)\
-       while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) &&     \
-              lang_next)                                               \
-               if ((lang_dirent.d_type == DT_DIR ||                    \
-                    (lang_dirent.d_type == DT_UNKNOWN &&               \
-                     is_directory(scripts_path, &lang_dirent))) &&     \
-                   (strcmp(lang_dirent.d_name, ".")) &&                \
-                   (strcmp(lang_dirent.d_name, "..")))
-
-#define for_each_script(lang_path, lang_dir, script_dirent, script_next)\
-       while (!readdir_r(lang_dir, &script_dirent, &script_next) &&    \
-              script_next)                                             \
-               if (script_dirent.d_type != DT_DIR &&                   \
-                   (script_dirent.d_type != DT_UNKNOWN ||              \
-                    !is_directory(lang_path, &script_dirent)))
+#define for_each_lang(scripts_path, scripts_dir, lang_dirent)          \
+       while ((lang_dirent = readdir(scripts_dir)) != NULL)            \
+               if ((lang_dirent->d_type == DT_DIR ||                   \
+                    (lang_dirent->d_type == DT_UNKNOWN &&              \
+                     is_directory(scripts_path, lang_dirent))) &&      \
+                   (strcmp(lang_dirent->d_name, ".")) &&               \
+                   (strcmp(lang_dirent->d_name, "..")))
+
+#define for_each_script(lang_path, lang_dir, script_dirent)            \
+       while ((script_dirent = readdir(lang_dir)) != NULL)             \
+               if (script_dirent->d_type != DT_DIR &&                  \
+                   (script_dirent->d_type != DT_UNKNOWN ||             \
+                    !is_directory(lang_path, script_dirent)))
 
 
 #define RECORD_SUFFIX                  "-record"
@@ -1575,7 +1573,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
                                  const char *s __maybe_unused,
                                  int unset __maybe_unused)
 {
-       struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+       struct dirent *script_dirent, *lang_dirent;
        char scripts_path[MAXPATHLEN];
        DIR *scripts_dir, *lang_dir;
        char script_path[MAXPATHLEN];
@@ -1590,19 +1588,19 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
        if (!scripts_dir)
                return -1;
 
-       for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+       for_each_lang(scripts_path, scripts_dir, lang_dirent) {
                snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
-                        lang_dirent.d_name);
+                        lang_dirent->d_name);
                lang_dir = opendir(lang_path);
                if (!lang_dir)
                        continue;
 
-               for_each_script(lang_path, lang_dir, script_dirent, script_next) {
-                       script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
+               for_each_script(lang_path, lang_dir, script_dirent) {
+                       script_root = get_script_root(script_dirent, REPORT_SUFFIX);
                        if (script_root) {
                                desc = script_desc__findnew(script_root);
                                snprintf(script_path, MAXPATHLEN, "%s/%s",
-                                        lang_path, script_dirent.d_name);
+                                        lang_path, script_dirent->d_name);
                                read_script_info(desc, script_path);
                                free(script_root);
                        }
@@ -1690,7 +1688,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
  */
 int find_scripts(char **scripts_array, char **scripts_path_array)
 {
-       struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+       struct dirent *script_dirent, *lang_dirent;
        char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
        DIR *scripts_dir, *lang_dir;
        struct perf_session *session;
@@ -1713,9 +1711,9 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
                return -1;
        }
 
-       for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+       for_each_lang(scripts_path, scripts_dir, lang_dirent) {
                snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
-                        lang_dirent.d_name);
+                        lang_dirent->d_name);
 #ifdef NO_LIBPERL
                if (strstr(lang_path, "perl"))
                        continue;
@@ -1729,16 +1727,16 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
                if (!lang_dir)
                        continue;
 
-               for_each_script(lang_path, lang_dir, script_dirent, script_next) {
+               for_each_script(lang_path, lang_dir, script_dirent) {
                        /* Skip those real time scripts: xxxtop.p[yl] */
-                       if (strstr(script_dirent.d_name, "top."))
+                       if (strstr(script_dirent->d_name, "top."))
                                continue;
                        sprintf(scripts_path_array[i], "%s/%s", lang_path,
-                               script_dirent.d_name);
-                       temp = strchr(script_dirent.d_name, '.');
+                               script_dirent->d_name);
+                       temp = strchr(script_dirent->d_name, '.');
                        snprintf(scripts_array[i],
-                               (temp - script_dirent.d_name) + 1,
-                               "%s", script_dirent.d_name);
+                               (temp - script_dirent->d_name) + 1,
+                               "%s", script_dirent->d_name);
 
                        if (check_ev_match(lang_path,
                                        scripts_array[i], session))
@@ -1756,7 +1754,7 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
 
 static char *get_script_path(const char *script_root, const char *suffix)
 {
-       struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+       struct dirent *script_dirent, *lang_dirent;
        char scripts_path[MAXPATHLEN];
        char script_path[MAXPATHLEN];
        DIR *scripts_dir, *lang_dir;
@@ -1769,21 +1767,21 @@ static char *get_script_path(const char *script_root, const char *suffix)
        if (!scripts_dir)
                return NULL;
 
-       for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+       for_each_lang(scripts_path, scripts_dir, lang_dirent) {
                snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
-                        lang_dirent.d_name);
+                        lang_dirent->d_name);
                lang_dir = opendir(lang_path);
                if (!lang_dir)
                        continue;
 
-               for_each_script(lang_path, lang_dir, script_dirent, script_next) {
-                       __script_root = get_script_root(&script_dirent, suffix);
+               for_each_script(lang_path, lang_dir, script_dirent) {
+                       __script_root = get_script_root(script_dirent, suffix);
                        if (__script_root && !strcmp(script_root, __script_root)) {
                                free(__script_root);
                                closedir(lang_dir);
                                closedir(scripts_dir);
                                snprintf(script_path, MAXPATHLEN, "%s/%s",
-                                        lang_path, script_dirent.d_name);
+                                        lang_path, script_dirent->d_name);
                                return strdup(script_path);
                        }
                        free(__script_root);
index 1f19f2f999c841b9da140e10bcaf5e6e0f41ee6b..307e8a1a003c5ebfd7eefe6a8770a868cd3b55e9 100644 (file)
@@ -528,6 +528,7 @@ static int __run_perf_stat(int argc, const char **argv)
                perf_evlist__set_leader(evsel_list);
 
        evlist__for_each(evsel_list, counter) {
+try_again:
                if (create_perf_stat_counter(counter) < 0) {
                        /*
                         * PPC returns ENXIO for HW counters until 2.6.37
@@ -544,7 +545,11 @@ static int __run_perf_stat(int argc, const char **argv)
                                if ((counter->leader != counter) ||
                                    !(counter->leader->nr_members > 1))
                                        continue;
-                       }
+                       } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
+                                if (verbose)
+                                        ui__warning("%s\n", msg);
+                                goto try_again;
+                        }
 
                        perf_evsel__open_strerror(counter, &target,
                                                  errno, msg, sizeof(msg));
index f7d7f5a1cad538e44be9400b520ca32315eed999..6f8f6430f2bf6be5ef09565cabedf33ed8963a7b 100644 (file)
@@ -268,6 +268,12 @@ else
     ifneq ($(feature-dwarf), 1)
       msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
       NO_DWARF := 1
+    else
+      ifneq ($(feature-dwarf_getlocations), 1)
+        msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.157);
+      else
+        CFLAGS += -DHAVE_DWARF_GETLOCATIONS
+      endif # dwarf_getlocations
     endif # Dwarf support
   endif # libelf support
 endif # NO_LIBELF
index 577e600c8eb15a66cb50580b0c6361060147894a..aea189b41cc8c43f8ce325c4fde2f8ae27507df3 100644 (file)
@@ -959,6 +959,7 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
        return 0;
 }
 
+#ifdef HAVE_DWARF_GETLOCATIONS
 /**
  * die_get_var_innermost_scope - Get innermost scope range of given variable DIE
  * @sp_die: a subprogram DIE
@@ -1080,3 +1081,11 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
 
        return ret;
 }
+#else
+int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
+                     Dwarf_Die *vr_die __maybe_unused,
+                     struct strbuf *buf __maybe_unused)
+{
+       return -ENOTSUP;
+}
+#endif
index dad55d04ffdd5074c212fac7dbcd306444f008e6..edcf4ed4e99c8891e8990b71389641b5e14ae9ef 100644 (file)
@@ -433,7 +433,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
 {
        char filename[PATH_MAX];
        DIR *tasks;
-       struct dirent dirent, *next;
+       struct dirent *dirent;
        pid_t tgid, ppid;
        int rc = 0;
 
@@ -462,11 +462,11 @@ static int __event__synthesize_thread(union perf_event *comm_event,
                return 0;
        }
 
-       while (!readdir_r(tasks, &dirent, &next) && next) {
+       while ((dirent = readdir(tasks)) != NULL) {
                char *end;
                pid_t _pid;
 
-               _pid = strtol(dirent.d_name, &end, 10);
+               _pid = strtol(dirent->d_name, &end, 10);
                if (*end)
                        continue;
 
@@ -575,7 +575,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
 {
        DIR *proc;
        char proc_path[PATH_MAX];
-       struct dirent dirent, *next;
+       struct dirent *dirent;
        union perf_event *comm_event, *mmap_event, *fork_event;
        int err = -1;
 
@@ -600,9 +600,9 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
        if (proc == NULL)
                goto out_free_fork;
 
-       while (!readdir_r(proc, &dirent, &next) && next) {
+       while ((dirent = readdir(proc)) != NULL) {
                char *end;
-               pid_t pid = strtol(dirent.d_name, &end, 10);
+               pid_t pid = strtol(dirent->d_name, &end, 10);
 
                if (*end) /* only interested in proper numerical dirents */
                        continue;
index 738ce226002b8a0e88093fefaf74e9fe6093a13a..645dc18288367733567b760f136b81657a5937ef 100644 (file)
@@ -2345,6 +2345,8 @@ out:
 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
                          char *msg, size_t msgsize)
 {
+       int paranoid;
+
        if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
            evsel->attr.type   == PERF_TYPE_HARDWARE &&
            evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
@@ -2363,6 +2365,22 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
                evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
 
                zfree(&evsel->name);
+               return true;
+       } else if (err == EACCES && !evsel->attr.exclude_kernel &&
+                  (paranoid = perf_event_paranoid()) > 1) {
+               const char *name = perf_evsel__name(evsel);
+               char *new_name;
+
+               if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0)
+                       return false;
+
+               if (evsel->name)
+                       free(evsel->name);
+               evsel->name = new_name;
+               scnprintf(msg, msgsize,
+"kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
+               evsel->attr.exclude_kernel = 1;
+
                return true;
        }
 
@@ -2382,12 +2400,13 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
                 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
                 "which controls use of the performance events system by\n"
                 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
-                "The default value is 1:\n\n"
+                "The current value is %d:\n\n"
                 "  -1: Allow use of (almost) all events by all users\n"
                 ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
                 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
                 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
-                                target->system_wide ? "system-wide " : "");
+                                target->system_wide ? "system-wide " : "",
+                                perf_event_paranoid());
        case ENOENT:
                return scnprintf(msg, size, "The %s event is not supported.",
                                 perf_evsel__name(evsel));
index 4c19d5e79d8c4d626eb3fa91486cc1d83447aeeb..bcbc983d4b12215dc1045fb0542597f08db38b45 100644 (file)
@@ -138,11 +138,11 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
 #define PERF_EVENT_TYPE(config)                __PERF_EVENT_FIELD(config, TYPE)
 #define PERF_EVENT_ID(config)          __PERF_EVENT_FIELD(config, EVENT)
 
-#define for_each_subsystem(sys_dir, sys_dirent, sys_next)             \
-       while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next)        \
-       if (sys_dirent.d_type == DT_DIR &&                                     \
-          (strcmp(sys_dirent.d_name, ".")) &&                                 \
-          (strcmp(sys_dirent.d_name, "..")))
+#define for_each_subsystem(sys_dir, sys_dirent)                        \
+       while ((sys_dirent = readdir(sys_dir)) != NULL)         \
+               if (sys_dirent->d_type == DT_DIR &&             \
+                   (strcmp(sys_dirent->d_name, ".")) &&        \
+                   (strcmp(sys_dirent->d_name, "..")))
 
 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
 {
@@ -159,12 +159,12 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
        return 0;
 }
 
-#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next)             \
-       while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next)        \
-       if (evt_dirent.d_type == DT_DIR &&                                     \
-          (strcmp(evt_dirent.d_name, ".")) &&                                 \
-          (strcmp(evt_dirent.d_name, "..")) &&                                \
-          (!tp_event_has_id(&sys_dirent, &evt_dirent)))
+#define for_each_event(sys_dirent, evt_dir, evt_dirent)                \
+       while ((evt_dirent = readdir(evt_dir)) != NULL)         \
+               if (evt_dirent->d_type == DT_DIR &&             \
+                   (strcmp(evt_dirent->d_name, ".")) &&        \
+                   (strcmp(evt_dirent->d_name, "..")) &&       \
+                   (!tp_event_has_id(sys_dirent, evt_dirent)))
 
 #define MAX_EVENT_LENGTH 512
 
@@ -173,7 +173,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
 {
        struct tracepoint_path *path = NULL;
        DIR *sys_dir, *evt_dir;
-       struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+       struct dirent *sys_dirent, *evt_dirent;
        char id_buf[24];
        int fd;
        u64 id;
@@ -184,18 +184,18 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
        if (!sys_dir)
                return NULL;
 
-       for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+       for_each_subsystem(sys_dir, sys_dirent) {
 
                snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-                        sys_dirent.d_name);
+                        sys_dirent->d_name);
                evt_dir = opendir(dir_path);
                if (!evt_dir)
                        continue;
 
-               for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+               for_each_event(sys_dirent, evt_dir, evt_dirent) {
 
                        snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
-                                evt_dirent.d_name);
+                                evt_dirent->d_name);
                        fd = open(evt_path, O_RDONLY);
                        if (fd < 0)
                                continue;
@@ -220,9 +220,9 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
                                        free(path);
                                        return NULL;
                                }
-                               strncpy(path->system, sys_dirent.d_name,
+                               strncpy(path->system, sys_dirent->d_name,
                                        MAX_EVENT_LENGTH);
-                               strncpy(path->name, evt_dirent.d_name,
+                               strncpy(path->name, evt_dirent->d_name,
                                        MAX_EVENT_LENGTH);
                                return path;
                        }
@@ -1812,7 +1812,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
                             bool name_only)
 {
        DIR *sys_dir, *evt_dir;
-       struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+       struct dirent *sys_dirent, *evt_dirent;
        char evt_path[MAXPATHLEN];
        char dir_path[MAXPATHLEN];
        char **evt_list = NULL;
@@ -1830,20 +1830,20 @@ restart:
                        goto out_close_sys_dir;
        }
 
-       for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+       for_each_subsystem(sys_dir, sys_dirent) {
                if (subsys_glob != NULL &&
-                   !strglobmatch(sys_dirent.d_name, subsys_glob))
+                   !strglobmatch(sys_dirent->d_name, subsys_glob))
                        continue;
 
                snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-                        sys_dirent.d_name);
+                        sys_dirent->d_name);
                evt_dir = opendir(dir_path);
                if (!evt_dir)
                        continue;
 
-               for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+               for_each_event(sys_dirent, evt_dir, evt_dirent) {
                        if (event_glob != NULL &&
-                           !strglobmatch(evt_dirent.d_name, event_glob))
+                           !strglobmatch(evt_dirent->d_name, event_glob))
                                continue;
 
                        if (!evt_num_known) {
@@ -1852,7 +1852,7 @@ restart:
                        }
 
                        snprintf(evt_path, MAXPATHLEN, "%s:%s",
-                                sys_dirent.d_name, evt_dirent.d_name);
+                                sys_dirent->d_name, evt_dirent->d_name);
 
                        evt_list[evt_i] = strdup(evt_path);
                        if (evt_list[evt_i] == NULL)
@@ -1905,7 +1905,7 @@ out_close_sys_dir:
 int is_valid_tracepoint(const char *event_string)
 {
        DIR *sys_dir, *evt_dir;
-       struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+       struct dirent *sys_dirent, *evt_dirent;
        char evt_path[MAXPATHLEN];
        char dir_path[MAXPATHLEN];
 
@@ -1913,17 +1913,17 @@ int is_valid_tracepoint(const char *event_string)
        if (!sys_dir)
                return 0;
 
-       for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+       for_each_subsystem(sys_dir, sys_dirent) {
 
                snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-                        sys_dirent.d_name);
+                        sys_dirent->d_name);
                evt_dir = opendir(dir_path);
                if (!evt_dir)
                        continue;
 
-               for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+               for_each_event(sys_dirent, evt_dir, evt_dirent) {
                        snprintf(evt_path, MAXPATHLEN, "%s:%s",
-                                sys_dirent.d_name, evt_dirent.d_name);
+                                sys_dirent->d_name, evt_dirent->d_name);
                        if (!strcmp(evt_path, event_string)) {
                                closedir(evt_dir);
                                closedir(sys_dir);
index 47966a1618c7310108a42de59327930e6acdb184..f5ba111cd9fb2a00757a36af8d706a3f4380c92e 100644 (file)
@@ -2445,6 +2445,9 @@ static char *prefix_if_not_in(const char *pre, char *str)
 
 static char *setup_overhead(char *keys)
 {
+       if (sort__mode == SORT_MODE__DIFF)
+               return keys;
+
        keys = prefix_if_not_in("overhead", keys);
 
        if (symbol_conf.cumulate_callchain)
index 08afc69099538f66172968dc3827fd9b7b40d5c2..267112b4e3dbe9d291cb58b75d50428b177599a8 100644 (file)
@@ -94,7 +94,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
        DIR *proc;
        int max_threads = 32, items, i;
        char path[256];
-       struct dirent dirent, *next, **namelist = NULL;
+       struct dirent *dirent, **namelist = NULL;
        struct thread_map *threads = thread_map__alloc(max_threads);
 
        if (threads == NULL)
@@ -107,16 +107,16 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
        threads->nr = 0;
        atomic_set(&threads->refcnt, 1);
 
-       while (!readdir_r(proc, &dirent, &next) && next) {
+       while ((dirent = readdir(proc)) != NULL) {
                char *end;
                bool grow = false;
                struct stat st;
-               pid_t pid = strtol(dirent.d_name, &end, 10);
+               pid_t pid = strtol(dirent->d_name, &end, 10);
 
                if (*end) /* only interested in proper numerical dirents */
                        continue;
 
-               snprintf(path, sizeof(path), "/proc/%s", dirent.d_name);
+               snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
 
                if (stat(path, &st) != 0)
                        continue;