]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge ath-next from ath.git
authorKalle Valo <kvalo@codeaurora.org>
Wed, 26 Aug 2015 09:40:23 +0000 (12:40 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Wed, 26 Aug 2015 09:40:23 +0000 (12:40 +0300)
Major changes in ath10k:

* add spectral scan support for qca99x0
* add qca6164 support

915 files changed:
.mailmap
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
Documentation/devicetree/bindings/net/dsa/dsa.txt
Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/st-nci.txt [deleted file]
Documentation/devicetree/bindings/phy/ti-phy.txt
Documentation/input/alps.txt
Documentation/networking/6lowpan.txt [new file with mode: 0644]
Documentation/networking/ip-sysctl.txt
Documentation/networking/switchdev.txt
Documentation/networking/vxlan.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/atomic.h
arch/arc/include/asm/ptrace.h
arch/arc/include/asm/spinlock.h
arch/arc/include/asm/spinlock_types.h
arch/arc/include/uapi/asm/ptrace.h
arch/arc/kernel/setup.c
arch/arc/kernel/time.c
arch/arc/lib/memcpy-archs.S
arch/arc/lib/memset-archs.S
arch/arc/plat-axs10x/axs10x.c
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/exynos3250.dtsi
arch/arm/boot/dts/exynos4210-origen.dts
arch/arm/boot/dts/exynos4210-trats.dts
arch/arm/boot/dts/exynos4210-universal_c210.dts
arch/arm/boot/dts/exynos4210.dtsi
arch/arm/boot/dts/imx35.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/k2e-clocks.dtsi
arch/arm/boot/dts/k2e.dtsi
arch/arm/boot/dts/k2hk-clocks.dtsi
arch/arm/boot/dts/k2hk.dtsi
arch/arm/boot/dts/k2l-clocks.dtsi
arch/arm/boot/dts/k2l.dtsi
arch/arm/boot/dts/keystone.dtsi
arch/arm/boot/dts/omap2430.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/boot/dts/ste-nomadik-nhk15.dts
arch/arm/boot/dts/ste-nomadik-s8815.dts
arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
arch/arm/kernel/entry-common.S
arch/arm/kernel/head.S
arch/arm/kernel/vdso.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
arch/arm/vdso/Makefile
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/arm64/kernel/signal32.c
arch/arm64/kernel/vdso.c
arch/mips/Kconfig
arch/mips/ath79/setup.c
arch/mips/cavium-octeon/smp.c
arch/mips/include/asm/mach-bcm63xx/dma-coherence.h [deleted file]
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/smp.h
arch/mips/include/asm/stackframe.h
arch/mips/kernel/mips-mt-fpaff.c
arch/mips/kernel/prom.c
arch/mips/kernel/relocate_kernel.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/signal32.c
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/lantiq/irq.c
arch/mips/loongson64/loongson-3/smp.c
arch/mips/mm/cache.c
arch/mips/mm/fault.c
arch/mips/mti-malta/malta-int.c
arch/mips/mti-malta/malta-time.c
arch/mips/mti-sead3/sead3-time.c
arch/mips/netlogic/common/smp.c
arch/mips/paravirt/paravirt-smp.c
arch/mips/pistachio/time.c
arch/mips/pmcs-msp71xx/msp_smp.c
arch/mips/ralink/irq.c
arch/mips/sgi-ip27/ip27-irq.c
arch/mips/sibyte/bcm1480/smp.c
arch/mips/sibyte/sb1250/smp.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/s390/kvm/kvm-s390.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/include/asm/visasm.h
arch/sparc/lib/NG4memcpy.S
arch/sparc/lib/VISsave.S
arch/sparc/lib/ksyms.c
arch/tile/kernel/compat_signal.c
arch/x86/boot/compressed/eboot.c
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/desc.h
arch/x86/include/asm/mmu.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/sigcontext.h
arch/x86/include/uapi/asm/sigcontext.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_cqm.c
arch/x86/kernel/ldt.c
arch/x86/kernel/process_64.c
arch/x86/kernel/signal.c
arch/x86/kernel/step.c
arch/x86/kvm/mtrr.c
arch/x86/kvm/x86.c
arch/x86/math-emu/fpu_entry.c
arch/x86/math-emu/fpu_system.h
arch/x86/math-emu/get_address.c
arch/x86/net/bpf_jit_comp.c
arch/x86/platform/efi/efi.c
arch/x86/power/cpu.c
arch/x86/xen/Kconfig
arch/x86/xen/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/xen-ops.h
block/blk-settings.c
crypto/authencesn.c
drivers/acpi/video_detect.c
drivers/ata/ahci_brcmstb.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/sata_sx4.c
drivers/base/property.c
drivers/base/regmap/regcache-rbtree.c
drivers/bcma/Kconfig
drivers/bcma/driver_gpio.c
drivers/block/rbd.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/Kconfig
drivers/bluetooth/Makefile
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btqca.c [new file with mode: 0644]
drivers/bluetooth/btqca.h [new file with mode: 0644]
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_qca.c [new file with mode: 0644]
drivers/bluetooth/hci_uart.h
drivers/char/hw_random/core.c
drivers/clk/pxa/clk-pxa3xx.c
drivers/clocksource/sh_cmt.c
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/crypto/caam/caamhash.c
drivers/crypto/ixp4xx_crypto.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/crypto/qat/qat_common/qat_algs.c
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac_regs.h
drivers/dma/at_xdmac.c
drivers/dma/dmaengine.c
drivers/dma/mv_xor.c
drivers/dma/pl330.c
drivers/dma/virt-dma.c
drivers/dma/virt-dma.h
drivers/dma/xgene-dma.c
drivers/edac/ppc4xx_edac.c
drivers/extcon/extcon-palmas.c
drivers/extcon/extcon.c
drivers/firmware/efi/efi.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/hid/hid-input.c
drivers/hid/hid-uclogic.c
drivers/hid/wacom_sys.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/g762.c
drivers/hwmon/nct7904.c
drivers/i2c/busses/i2c-bfin-twi.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-slave-eeprom.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/input/joystick/turbografx.c
drivers/input/misc/axp20x-pek.c
drivers/input/misc/twl4030-vibra.c
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/elantech.h
drivers/irqchip/irq-mips-gic.c
drivers/isdn/mISDN/dsp_audio.c
drivers/macintosh/ans-lcd.c
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-cache-policy-smq.c
drivers/md/dm-thin-metadata.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/persistent-data/dm-btree-internal.h
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/persistent-data/dm-btree-spine.c
drivers/md/persistent-data/dm-btree.c
drivers/md/raid1.c
drivers/md/raid5.c
drivers/memory/omap-gpmc.c
drivers/mfd/Kconfig
drivers/mfd/arizona-core.c
drivers/misc/eeprom/at24.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_hsi.c
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_spi.c
drivers/net/can/flexcan.c
drivers/net/can/usb/gs_usb.c
drivers/net/dsa/mv88e6171.c
drivers/net/dsa/mv88e6352.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/dummy.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/altera/altera_sgdma.c
drivers/net/ethernet/altera/altera_sgdmahw.h
drivers/net/ethernet/altera/altera_tse.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_cq.c
drivers/net/ethernet/cisco/enic/vnic_dev.c
drivers/net/ethernet/cisco/enic/vnic_dev.h
drivers/net/ethernet/cisco/enic/vnic_devcmd.h
drivers/net/ethernet/cisco/enic/vnic_intr.c
drivers/net/ethernet/cisco/enic/vnic_resource.h
drivers/net/ethernet/cisco/enic/vnic_rq.c
drivers/net/ethernet/cisco/enic/vnic_wq.c
drivers/net/ethernet/cisco/enic/vnic_wq.h
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/ezchip/nps_enet.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/mac-fec.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/regs.h
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_register.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_prototype.h
drivers/net/ethernet/intel/i40evf/i40e_register.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.h
drivers/net/ethernet/mellanox/mlxsw/Kconfig
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/item.h
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/pci.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/netcp.h
drivers/net/ethernet/ti/netcp_core.c
drivers/net/fddi/skfp/h/hwmtm.h
drivers/net/geneve.c
drivers/net/hamradio/mkiss.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/cc2520.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/loopback.c
drivers/net/nlmon.c
drivers/net/ntb_netdev.c
drivers/net/phy/aquantia.c
drivers/net/phy/marvell.c
drivers/net/phy/phy.c
drivers/net/phy/realtek.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/team.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vrf.c [new file with mode: 0644]
drivers/net/vxlan.c
drivers/net/wan/cosa.c
drivers/net/wan/hdlc_fr.c
drivers/net/wireless/ath/ath5k/Kconfig
drivers/net/wireless/ath/ath5k/ani.c
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/debug.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/brcm80211/brcmfmac/flowring.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.h
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/tof.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
drivers/net/wireless/rsi/rsi_91x_usb_ops.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/rt2500usb.h
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00link.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
drivers/net/wireless/rtlwifi/rtl8723be/sw.c
drivers/net/wireless/ti/wl18xx/acx.h
drivers/net/wireless/ti/wl18xx/debugfs.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nfc/Kconfig
drivers/nfc/Makefile
drivers/nfc/s3fwrn5/Kconfig [new file with mode: 0644]
drivers/nfc/s3fwrn5/Makefile [new file with mode: 0644]
drivers/nfc/s3fwrn5/core.c [new file with mode: 0644]
drivers/nfc/s3fwrn5/firmware.c [new file with mode: 0644]
drivers/nfc/s3fwrn5/firmware.h [new file with mode: 0644]
drivers/nfc/s3fwrn5/i2c.c [new file with mode: 0644]
drivers/nfc/s3fwrn5/nci.c [new file with mode: 0644]
drivers/nfc/s3fwrn5/nci.h [new file with mode: 0644]
drivers/nfc/s3fwrn5/s3fwrn5.h [new file with mode: 0644]
drivers/nfc/st-nci/Kconfig
drivers/nfc/st-nci/Makefile
drivers/nfc/st-nci/i2c.c
drivers/nfc/st-nci/ndlc.c
drivers/nfc/st-nci/spi.c [new file with mode: 0644]
drivers/nfc/st-nci/st-nci_se.c
drivers/nfc/st21nfca/st21nfca.c
drivers/nfc/trf7970a.c
drivers/ntb/ntb.c
drivers/ntb/ntb_transport.c
drivers/phy/phy-sun4i-usb.c
drivers/phy/phy-ti-pipe3.c
drivers/platform/chrome/Kconfig
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libiscsi.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/staging/comedi/drivers/das1800.c
drivers/staging/lustre/lustre/obdclass/debug.c
drivers/staging/vt6655/device_main.c
drivers/staging/wilc1000/linux_mon.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_configfs.c
drivers/target/target_core_hba.c
drivers/target/target_core_spc.c
drivers/thermal/cpu_cooling.c
drivers/thermal/hisi_thermal.c
drivers/thermal/power_allocator.c
drivers/thermal/samsung/Kconfig
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/thermal_core.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/host.c
drivers/usb/chipidea/host.h
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/udc/bdc/bdc_ep.c
drivers/usb/gadget/udc/udc-core.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/sierra.c
drivers/video/console/fbcon.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/omap2/dss/dss-of.c
drivers/video/fbdev/pxa3xx-gcu.c
drivers/video/of_videomode.c
drivers/virtio/virtio_input.c
drivers/xen/balloon.c
drivers/xen/gntdev.c
drivers/xen/xenbus/xenbus_client.c
fs/btrfs/qgroup.c
fs/ceph/caps.c
fs/ceph/locks.c
fs/ceph/super.h
fs/dcache.c
fs/file_table.c
fs/fuse/dev.c
fs/hugetlbfs/inode.c
fs/namei.c
fs/nfsd/nfs4layouts.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/notify/mark.c
fs/ocfs2/aops.c
fs/ocfs2/dlmglue.c
fs/signalfd.c
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_pciids.h
include/linux/ata.h
include/linux/average.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/bpf.h
include/linux/etherdevice.h
include/linux/fs.h
include/linux/ieee80211.h
include/linux/ipv6.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netfilter/nfnetlink_acct.h
include/linux/page-flags.h
include/linux/perf_event.h
include/linux/platform_data/st_nci.h [deleted file]
include/linux/property.h
include/linux/skbuff.h
include/net/6lowpan.h
include/net/bluetooth/hci_core.h
include/net/cfg80211.h
include/net/cfg802154.h
include/net/checksum.h
include/net/dsa.h
include/net/dst.h
include/net/dst_metadata.h
include/net/flow.h
include/net/gre.h
include/net/ip6_fib.h
include/net/ip_tunnels.h
include/net/lwtunnel.h
include/net/mac80211.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/ipv4/nf_dup_ipv4.h [new file with mode: 0644]
include/net/netfilter/ipv6/nf_dup_ipv6.h [new file with mode: 0644]
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_core.h
include/net/netfilter/nf_conntrack_expect.h
include/net/netfilter/nf_conntrack_zones.h
include/net/netfilter/nft_dup.h [new file with mode: 0644]
include/net/nfc/nci_core.h
include/net/nfc/nfc.h
include/net/nl802154.h
include/net/route.h
include/net/switchdev.h
include/net/vrf.h [new file with mode: 0644]
include/net/vxlan.h
include/net/xfrm.h
include/scsi/scsi_eh.h
include/sound/soc-topology.h
include/uapi/linux/bpf.h
include/uapi/linux/if_link.h
include/uapi/linux/if_packet.h
include/uapi/linux/if_tunnel.h
include/uapi/linux/ila.h [new file with mode: 0644]
include/uapi/linux/ipv6.h
include/uapi/linux/lwtunnel.h
include/uapi/linux/neighbour.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter/nfnetlink_conntrack.h
include/uapi/linux/netfilter/xt_CT.h
include/uapi/linux/pci_regs.h
include/uapi/linux/rtnetlink.h
include/uapi/sound/asoc.h
init/main.c
ipc/mqueue.c
ipc/sem.c
ipc/shm.c
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/kthread.c
kernel/locking/qspinlock_paravirt.h
kernel/module.c
kernel/signal.c
kernel/trace/bpf_trace.c
lib/Kconfig
lib/average.c [deleted file]
lib/iommu-common.c
lib/test_bpf.c
lib/test_rhashtable.c
mm/cma.h
mm/huge_memory.c
mm/kasan/kasan.c
mm/kasan/report.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/migrate.c
mm/page-writeback.c
mm/page_alloc.c
mm/shmem.c
mm/slab_common.c
mm/vmscan.c
net/6lowpan/Makefile
net/6lowpan/core.c [new file with mode: 0644]
net/6lowpan/iphc.c
net/8021q/vlan_dev.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/gateway_client.c
net/batman-adv/multicast.c
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/amp.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_request.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bridge/br_device.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/caif/caif_dev.c
net/core/datagram.c
net/core/dev.c
net/core/dst.c
net/core/filter.c
net/core/lwtunnel.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/pktgen.c
net/core/request_sock.c
net/core/skbuff.c
net/core/utils.c
net/dsa/dsa.c
net/dsa/slave.c
net/ethernet/eth.c
net/hsr/hsr_device.c
net/ieee802154/6lowpan/6lowpan_i.h
net/ieee802154/6lowpan/core.c
net/ieee802154/6lowpan/rx.c
net/ieee802154/6lowpan/tx.c
net/ieee802154/nl802154.c
net/ieee802154/rdev-ops.h
net/ieee802154/trace.h
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/fou.c
net/ipv4/gre_demux.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/ipt_ECN.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_dup_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_proto_icmp.c
net/ipv4/netfilter/nft_dup_ipv4.c [new file with mode: 0644]
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/ila.c [new file with mode: 0644]
net/ipv6/ip6_fib.c
net/ipv6/mcast_snoop.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_dup_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
net/ipv6/netfilter/nf_nat_proto_icmpv6.c
net/ipv6/netfilter/nft_dup_ipv6.c [new file with mode: 0644]
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_policy.c
net/mac80211/Kconfig
net/mac80211/Makefile
net/mac80211/aes_cmac.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/debugfs_key.c
net/mac80211/debugfs_netdev.c
net/mac80211/driver-ops.c [new file with mode: 0644]
net/mac80211/driver-ops.h
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_plink.c
net/mac80211/mesh_ps.c
net/mac80211/mesh_sync.c
net/mac80211/mlme.c
net/mac80211/ocb.c
net/mac80211/rate.c
net/mac80211/rate.h
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mac80211/wpa.c
net/mac802154/cfg.c
net/mac802154/iface.c
net/mac802154/main.c
net/mpls/af_mpls.c
net/mpls/mpls_iptunnel.c
net/netfilter/Kconfig
net/netfilter/ipvs/ip_vs_nfct.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_pptp.c
net/netfilter/nf_conntrack_seqadj.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_proto_dccp.c
net/netfilter/nf_nat_proto_tcp.c
net/netfilter/nf_nat_proto_udp.c
net/netfilter/nf_nat_proto_udplite.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nft_counter.c
net/netfilter/nft_limit.c
net/netfilter/nft_payload.c
net/netfilter/xt_CT.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netfilter/xt_TEE.c
net/netfilter/xt_connlimit.c
net/netfilter/xt_nfacct.c
net/netlink/af_netlink.c
net/nfc/nci/core.c
net/nfc/nci/hci.c
net/nfc/netlink.c
net/openvswitch/Kconfig
net/openvswitch/actions.c
net/openvswitch/flow_netlink.c
net/openvswitch/flow_table.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport-netdev.h
net/openvswitch/vport-vxlan.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/packet/internal.h
net/rds/bind.c
net/rds/connection.c
net/rds/ib.c
net/rds/ib_cm.c
net/rds/info.c
net/rds/iw.c
net/rds/iw_cm.c
net/rds/rds.h
net/rds/send.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/transport.c
net/rfkill/Kconfig
net/rfkill/rfkill-gpio.c
net/sched/act_connmark.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/sch_fifo.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_htb.c
net/sched/sch_plug.c
net/sched/sch_sfb.c
net/switchdev/switchdev.c
net/tipc/link.c
net/tipc/netlink_compat.c
net/tipc/node.c
net/wimax/op-rfkill.c
net/wireless/core.c
net/wireless/core.h
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/rdev-ops.h
net/wireless/reg.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/bpf/Makefile
samples/bpf/bpf_helpers.h
samples/bpf/tracex6_kern.c [new file with mode: 0644]
samples/bpf/tracex6_user.c [new file with mode: 0644]
scripts/kconfig/streamline_config.pl
security/yama/yama_lsm.c
sound/firewire/amdtp.c
sound/firewire/amdtp.h
sound/firewire/fireworks/fireworks.c
sound/firewire/fireworks/fireworks.h
sound/firewire/fireworks/fireworks_stream.c
sound/hda/ext/hdac_ext_controller.c
sound/hda/ext/hdac_ext_stream.c
sound/pci/hda/patch_realtek.c
sound/pci/oxygen/oxygen_mixer.c
sound/soc/Kconfig
sound/soc/Makefile
sound/soc/codecs/cs4265.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5645.h
sound/soc/intel/baytrail/sst-baytrail-ipc.c
sound/soc/intel/haswell/sst-haswell-ipc.c
sound/soc/soc-topology.c
sound/usb/card.c
tools/perf/config/Makefile
tools/perf/util/stat-shadow.c
tools/testing/selftests/net/psock_fanout.c
tools/testing/selftests/net/psock_lib.h

index b4091b7a78fe11ccd0e5f44f0703ace69dc09707..4b31af54ccd5864359c0810f9733f3026181a631 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
 Andreas Herrmann <aherrman@de.ibm.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
index d6b794cef0b8b9907ab5a055a6502180b4350148..91e6e5c478d006245c5a88e7ae7e304d6fa7f097 100644 (file)
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
                            "qcom,kpss-acc-v1"
                            "qcom,kpss-acc-v2"
                            "rockchip,rk3066-smp"
+                           "ste,dbx500-smp"
 
        - cpu-release-addr
                Usage: required for systems that have an "enable-method"
index d3058768b23de17e4c9916c2552b1266b9263d56..c53e0b08032fe73a42f130cd790b2b1dfa753939 100644 (file)
@@ -35,7 +35,7 @@ Example:
                        device_type = "dma";
                        reg = <0x0 0x1f270000 0x0 0x10000>,
                              <0x0 0x1f200000 0x0 0x10000>,
-                             <0x0 0x1b008000 0x0 0x2000>,
+                             <0x0 0x1b000000 0x0 0x400000>,
                              <0x0 0x1054a000 0x0 0x100>;
                        interrupts = <0x0 0x82 0x4>,
                                     <0x0 0xb8 0x4>,
index f0b4cd72411d66bf12aeec3d64335d6641b26674..04e6bef3ac3ff431560f53456a34d4b932c7eca0 100644 (file)
@@ -44,9 +44,10 @@ Note that a port labelled "dsa" will imply checking for the uplink phandle
 described below.
 
 Optionnal property:
-- link                 : Should be a phandle to another switch's DSA port.
+- link                 : Should be a list of phandles to another switch's DSA port.
                          This property is only used when switches are being
-                         chained/cascaded together.
+                         chained/cascaded together. This port is used as outgoing port
+                         towards the phandle port, which can be more than one hop away.
 
 - phy-handle           : Phandle to a PHY on an external MDIO bus, not the
                          switch internal one. See
@@ -58,6 +59,10 @@ Optionnal property:
                          Documentation/devicetree/bindings/net/ethernet.txt
                          for details.
 
+- mii-bus              : Should be a phandle to a valid MDIO bus device node.
+                         This mii-bus will be used in preference to the
+                         global dsa,mii-bus defined above, for this switch.
+
 Optional subnodes:
 - fixed-link           : Fixed-link subnode describing a link to a non-MDIO
                          managed entity. See
@@ -96,10 +101,11 @@ Example:
                                label = "cpu";
                        };
 
-                       switch0uplink: port@6 {
+                       switch0port6: port@6 {
                                reg = <6>;
                                label = "dsa";
-                               link = <&switch1uplink>;
+                               link = <&switch1port0
+                                       &switch2port0>;
                        };
                };
 
@@ -107,11 +113,31 @@ Example:
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <17 1>;   /* MDIO address 17, switch 1 in tree */
+                       mii-bus = <&mii_bus1>;
+
+                       switch1port0: port@0 {
+                               reg = <0>;
+                               label = "dsa";
+                               link = <&switch0port6>;
+                       };
+                       switch1port1: port@1 {
+                               reg = <1>;
+                               label = "dsa";
+                               link = <&switch2port1>;
+                       };
+               };
+
+               switch@2 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <18 2>;   /* MDIO address 18, switch 2 in tree */
+                       mii-bus = <&mii_bus1>;
 
-                       switch1uplink: port@0 {
+                       switch2port0: port@0 {
                                reg = <0>;
                                label = "dsa";
-                               link = <&switch0uplink>;
+                               link = <&switch1port1
+                                       &switch0port6>;
                        };
                };
        };
diff --git a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
new file mode 100644 (file)
index 0000000..fb1e75f
--- /dev/null
@@ -0,0 +1,27 @@
+* Samsung S3FWRN5 NCI NFC Controller
+
+Required properties:
+- compatible: Should be "samsung,s3fwrn5-i2c".
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- s3fwrn5,en-gpios: Output GPIO pin used for enabling/disabling the chip
+- s3fwrn5,fw-gpios: Output GPIO pin used to enter firmware mode and
+  sleep/wakeup control
+
+Example:
+
+&hsi2c_4 {
+       status = "okay";
+       s3fwrn5@27 {
+               compatible = "samsung,s3fwrn5-i2c";
+
+               reg = <0x27>;
+
+               interrupt-parent = <&gpa1>;
+               interrupts = <3 0 0>;
+
+               s3fwrn5,en-gpios = <&gpf1 4 0>;
+               s3fwrn5,fw-gpios = <&gpj0 2 0>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
new file mode 100644 (file)
index 0000000..d707588
--- /dev/null
@@ -0,0 +1,33 @@
+* STMicroelectronics SAS. ST NCI NFC Controller
+
+Required properties:
+- compatible: Should be "st,st21nfcb-i2c" or "st,st21nfcc-i2c".
+- clock-frequency: I²C work frequency.
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- reset-gpios: Output GPIO pin used to reset the ST21NFCB
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBoard xM with ST21NFCB on I2C2):
+
+&i2c2 {
+
+       status = "okay";
+
+       st21nfcb: st21nfcb@8 {
+
+               compatible = "st,st21nfcb-i2c";
+
+               reg = <0x08>;
+               clock-frequency = <400000>;
+
+               interrupt-parent = <&gpio5>;
+               interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+
+               reset-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
new file mode 100644 (file)
index 0000000..525681b
--- /dev/null
@@ -0,0 +1,31 @@
+* STMicroelectronics SAS. ST NCI NFC Controller
+
+Required properties:
+- compatible: Should be "st,st21nfcb-spi"
+- spi-max-frequency: Maximum SPI frequency (<= 10000000).
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- reset-gpios: Output GPIO pin used to reset the ST21NFCB
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBoard xM with ST21NFCB on SPI4):
+
+&mcspi4 {
+
+       status = "okay";
+
+       st21nfcb: st21nfcb@0 {
+
+               compatible = "st,st21nfcb-spi";
+
+               clock-frequency = <4000000>;
+
+               interrupt-parent = <&gpio5>;
+               interrupts = <2 IRQ_TYPE_EDGE_RISING>;
+
+               reset-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci.txt b/Documentation/devicetree/bindings/net/nfc/st-nci.txt
deleted file mode 100644 (file)
index d707588..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-* STMicroelectronics SAS. ST NCI NFC Controller
-
-Required properties:
-- compatible: Should be "st,st21nfcb-i2c" or "st,st21nfcc-i2c".
-- clock-frequency: I²C work frequency.
-- reg: address on the bus
-- interrupt-parent: phandle for the interrupt gpio controller
-- interrupts: GPIO interrupt to which the chip is connected
-- reset-gpios: Output GPIO pin used to reset the ST21NFCB
-
-Optional SoC Specific Properties:
-- pinctrl-names: Contains only one value - "default".
-- pintctrl-0: Specifies the pin control groups used for this controller.
-
-Example (for ARM-based BeagleBoard xM with ST21NFCB on I2C2):
-
-&i2c2 {
-
-       status = "okay";
-
-       st21nfcb: st21nfcb@8 {
-
-               compatible = "st,st21nfcb-i2c";
-
-               reg = <0x08>;
-               clock-frequency = <400000>;
-
-               interrupt-parent = <&gpio5>;
-               interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
-
-               reset-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
-       };
-};
index 305e3df3d9b1eb9a994c845eb28959275d2f20ed..9cf9446eaf2eac41d57251cb5853037e2b31e7c2 100644 (file)
@@ -82,6 +82,9 @@ Optional properties:
  - id: If there are multiple instance of the same type, in order to
    differentiate between each instance "id" can be used (e.g., multi-lane PCIe
    PHY). If "id" is not provided, it is set to default value of '1'.
+ - syscon-pllreset: Handle to system control region that contains the
+   CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
+   register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
 
 This is usually a subnode of ocp2scp to which it is connected.
 
@@ -100,3 +103,16 @@ usb3phy@4a084400 {
                        "sysclk",
                        "refclk";
 };
+
+sata_phy: phy@4A096000 {
+       compatible = "ti,phy-pipe3-sata";
+       reg = <0x4A096000 0x80>, /* phy_rx */
+             <0x4A096400 0x64>, /* phy_tx */
+             <0x4A096800 0x40>; /* pll_ctrl */
+       reg-names = "phy_rx", "phy_tx", "pll_ctrl";
+       ctrl-module = <&omap_control_sata>;
+       clocks = <&sys_clkin1>, <&sata_ref_clk>;
+       clock-names = "sysclk", "refclk";
+       syscon-pllreset = <&scm_conf 0x3fc>;
+       #phy-cells = <0>;
+};
index c86f2f1ae4f6aa2d9af3e3987e8be06fd237dbef..1fec1135791d98c987105872c63b5e96589633d3 100644 (file)
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
 
 Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
-the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
-buttons get reported separately in the PSM, PSR and PSL bits.
+the DualPoint Stick. The M, R and L bits signal the combined status of both
+the pointingstick and touchpad buttons, except for Dell dualpoint devices
+where the pointingstick buttons get reported separately in the PSM, PSR
+and PSL bits.
 
 Dualpoint device -- interleaved packet format
 ---------------------------------------------
diff --git a/Documentation/networking/6lowpan.txt b/Documentation/networking/6lowpan.txt
new file mode 100644 (file)
index 0000000..a7dc7e9
--- /dev/null
@@ -0,0 +1,50 @@
+
+Netdev private dataroom for 6lowpan interfaces:
+
+All 6lowpan able net devices, means all interfaces with ARPHRD_6LOWPAN,
+must have "struct lowpan_priv" placed at beginning of netdev_priv.
+
+The priv_size of each interface should be calculate by:
+
+ dev->priv_size = LOWPAN_PRIV_SIZE(LL_6LOWPAN_PRIV_DATA);
+
+Where LL_PRIV_6LOWPAN_DATA is sizeof linklayer 6lowpan private data struct.
+To access the LL_PRIV_6LOWPAN_DATA structure you can cast:
+
+ lowpan_priv(dev)-priv;
+
+to your LL_6LOWPAN_PRIV_DATA structure.
+
+Before registering the lowpan netdev interface you must run:
+
+ lowpan_netdev_setup(dev, LOWPAN_LLTYPE_FOOBAR);
+
+wheres LOWPAN_LLTYPE_FOOBAR is a define for your 6LoWPAN linklayer type of
+enum lowpan_lltypes.
+
+Example to evaluate the private usually you can do:
+
+static inline sturct lowpan_priv_foobar *
+lowpan_foobar_priv(struct net_device *dev)
+{
+       return (sturct lowpan_priv_foobar *)lowpan_priv(dev)->priv;
+}
+
+switch (dev->type) {
+case ARPHRD_6LOWPAN:
+       lowpan_priv = lowpan_priv(dev);
+       /* do great stuff which is ARPHRD_6LOWPAN related */
+       switch (lowpan_priv->lltype) {
+       case LOWPAN_LLTYPE_FOOBAR:
+               /* do 802.15.4 6LoWPAN handling here */
+               lowpan_foobar_priv(dev)->bar = foo;
+               break;
+       ...
+       }
+       break;
+...
+}
+
+In case of generic 6lowpan branch ("net/6lowpan") you can remove the check
+on ARPHRD_6LOWPAN, because you can be sure that these function are called
+by ARPHRD_6LOWPAN interfaces.
index 56db1efd7189ac6b25fdfc4a18b579f28e01a4b7..46e88ed7f41d202326b3f198eb9bd0c02b62c5f9 100644 (file)
@@ -1181,6 +1181,11 @@ tag - INTEGER
        Allows you to write a number, which can be used as required.
        Default value is 0.
 
+xfrm4_gc_thresh - INTEGER
+       The threshold at which we will start garbage collecting for IPv4
+       destination cache entries.  At twice this value the system will
+       refuse new allocations.
+
 Alexey Kuznetsov.
 kuznet@ms2.inr.ac.ru
 
@@ -1617,6 +1622,11 @@ ratelimit - INTEGER
        otherwise the minimal space between responses in milliseconds.
        Default: 1000
 
+xfrm6_gc_thresh - INTEGER
+       The threshold at which we will start garbage collecting for IPv6
+       destination cache entries.  At twice this value the system will
+       refuse new allocations.
+
 
 IPv6 Update by:
 Pekka Savola <pekkas@netcore.fi>
index 9825f32a86349c1469a114d0aa85318115fac1e0..476df0496686d50b16fa73126cc4fdf52ac004f7 100644 (file)
@@ -367,4 +367,5 @@ driver's rocker_port_ipv4_resolve() for an example.
 
 The driver can monitor for updates to arp_tbl using the netevent notifier
 NETEVENT_NEIGH_UPDATE.  The device can be programmed with resolved nexthops
-for the routes as arp_tbl updates.
+for the routes as arp_tbl updates.  The driver implements ndo_neigh_destroy
+to know when arp_tbl neighbor entries are purged from the port.
index 6d993510f091c2088877ccfc1edf15474c5c723f..c28f4989c3f0143155b2d651a688e119e6b6c2eb 100644 (file)
@@ -1,32 +1,36 @@
 Virtual eXtensible Local Area Networking documentation
 ======================================================
 
-The VXLAN protocol is a tunnelling protocol that is designed to
-solve the problem of limited number of available VLAN's (4096).
-With VXLAN identifier is expanded to 24 bits.
-
-It is a draft RFC standard, that is implemented by Cisco Nexus,
-Vmware and Brocade. The protocol runs over UDP using a single
-destination port (still not standardized by IANA).
-This document describes the Linux kernel tunnel device,
-there is also an implantation of VXLAN for Openvswitch.
-
-Unlike most tunnels, a VXLAN is a 1 to N network, not just point
-to point. A VXLAN device can either dynamically learn the IP address
-of the other end, in a manner similar to a learning bridge, or the
-forwarding entries can be configured statically.
-
-The management of vxlan is done in a similar fashion to it's
-too closest neighbors GRE and VLAN. Configuring VXLAN requires
-the version of iproute2 that matches the kernel release
-where VXLAN was first merged upstream.
+The VXLAN protocol is a tunnelling protocol designed to solve the
+problem of limited VLAN IDs (4096) in IEEE 802.1q.  With VXLAN the
+size of the identifier is expanded to 24 bits (16777216).
+
+VXLAN is described by IETF RFC 7348, and has been implemented by a
+number of vendors.  The protocol runs over UDP using a single
+destination port.  This document describes the Linux kernel tunnel
+device, there is also a separate implementation of VXLAN for
+Openvswitch.
+
+Unlike most tunnels, a VXLAN is a 1 to N network, not just point to
+point. A VXLAN device can learn the IP address of the other endpoint
+either dynamically in a manner similar to a learning bridge, or make
+use of statically-configured forwarding entries.
+
+The management of vxlan is done in a manner similar to its two closest
+neighbors GRE and VLAN. Configuring VXLAN requires the version of
+iproute2 that matches the kernel release where VXLAN was first merged
+upstream.
 
 1. Create vxlan device
-  # ip li add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1
-
-This creates a new device (vxlan0). The device uses the
-the multicast group 239.1.1.1 over eth1 to handle packets where
-no entry is in the forwarding table.
+ # ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1 dstport 4789
+
+This creates a new device named vxlan0.  The device uses the multicast
+group 239.1.1.1 over eth1 to handle traffic for which there is no
+entry in the forwarding table.  The destination port number is set to
+the IANA-assigned value of 4789.  The Linux implementation of VXLAN
+pre-dates the IANA's selection of a standard destination port number
+and uses the Linux-selected value by default to maintain backwards
+compatibility.
 
 2. Delete vxlan device
   # ip link delete vxlan0
index 98ede02a96f2bf5666ab2cf14ef4caceb0fdf9cb..4e6dcb692d3069eb2777d8304f511679906f3188 100644 (file)
@@ -158,6 +158,7 @@ L:  linux-wpan@vger.kernel.org
 S:     Maintained
 F:     net/6lowpan/
 F:     include/net/6lowpan.h
+F:     Documentation/networking/6lowpan.txt
 
 6PACK NETWORK DRIVER FOR AX.25
 M:     Andreas Koensgen <ajk@comnets.uni-bremen.de>
@@ -3587,6 +3588,15 @@ S:       Maintained
 F:     drivers/gpu/drm/rockchip/
 F:     Documentation/devicetree/bindings/video/rockchip*
 
+DRM DRIVERS FOR STI
+M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M:     Vincent Abriou <vincent.abriou@st.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+S:     Maintained
+F:     drivers/gpu/drm/sti
+F:     Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+
 DSBR100 USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -5600,6 +5610,7 @@ F:        kernel/irq/
 IRQCHIP DRIVERS
 M:     Thomas Gleixner <tglx@linutronix.de>
 M:     Jason Cooper <jason@lakedaemon.net>
+M:     Marc Zyngier <marc.zyngier@arm.com>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -5608,11 +5619,14 @@ F:      Documentation/devicetree/bindings/interrupt-controller/
 F:     drivers/irqchip/
 
 IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
-M:     Benjamin Herrenschmidt <benh@kernel.crashing.org>
+M:     Jiang Liu <jiang.liu@linux.intel.com>
+M:     Marc Zyngier <marc.zyngier@arm.com>
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:     Documentation/IRQ-domain.txt
 F:     include/linux/irqdomain.h
 F:     kernel/irq/irqdomain.c
+F:     kernel/irq/msi.c
 
 ISAPNP
 M:     Jaroslav Kysela <perex@perex.cz>
@@ -8867,6 +8881,12 @@ L:       linux-media@vger.kernel.org
 S:     Supported
 F:     drivers/media/i2c/s5k5baf.c
 
+SAMSUNG S3FWRN5 NFC DRIVER
+M:     Robert Baldyga <r.baldyga@samsung.com>
+L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+S:     Supported
+F:     drivers/nfc/s3fwrn5
+
 SAMSUNG SOC CLOCK DRIVERS
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 M:     Tomasz Figa <tomasz.figa@gmail.com>
index afabc44a349b7b31a2028660e7e61b134556a675..6e88c371b32f760fb8c13601d23869e91ebd0289 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
@@ -597,6 +597,11 @@ endif # $(dot-config)
 # Defaults to vmlinux, but the arch makefile usually adds further targets
 all: vmlinux
 
+# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
+# values of the respective KBUILD_* variables
+ARCH_CPPFLAGS :=
+ARCH_AFLAGS :=
+ARCH_CFLAGS :=
 include arch/$(SRCARCH)/Makefile
 
 KBUILD_CFLAGS  += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -848,10 +853,10 @@ export mod_strip_cmd
 mod_compress_cmd = true
 ifdef CONFIG_MODULE_COMPRESS
   ifdef CONFIG_MODULE_COMPRESS_GZIP
-    mod_compress_cmd = gzip -n
+    mod_compress_cmd = gzip -n -f
   endif # CONFIG_MODULE_COMPRESS_GZIP
   ifdef CONFIG_MODULE_COMPRESS_XZ
-    mod_compress_cmd = xz
+    mod_compress_cmd = xz -f
   endif # CONFIG_MODULE_COMPRESS_XZ
 endif # CONFIG_MODULE_COMPRESS
 export mod_compress_cmd
index 91cf4055acab0439e564a96056012befd5fb4c36..bd4670d1b89bcabf043f13015c01b59397b73427 100644 (file)
@@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K
 
 config ARC_PAGE_SIZE_16K
        bool "16KB"
-       depends on ARC_MMU_V3
+       depends on ARC_MMU_V3 || ARC_MMU_V4
 
 config ARC_PAGE_SIZE_4K
        bool "4KB"
-       depends on ARC_MMU_V3
+       depends on ARC_MMU_V3 || ARC_MMU_V4
 
 endchoice
 
@@ -365,6 +365,11 @@ config ARC_HAS_LLSC
        default y
        depends on !ARC_CANT_LLSC
 
+config ARC_STAR_9000923308
+       bool "Workaround for llock/scond livelock"
+       default y
+       depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
+
 config ARC_HAS_SWAPE
        bool "Insn: SWAPE (endian-swap)"
        default y
@@ -379,6 +384,10 @@ config ARC_HAS_LL64
          dest operands with 2 possible source operands.
        default y
 
+config ARC_HAS_DIV_REM
+       bool "Insn: div, divu, rem, remu"
+       default y
+
 config ARC_HAS_RTC
        bool "Local 64-bit r/o cycle counter"
        default n
index 46d87310220dadaf96be4ff08c42b240d2eb4916..8a27a48304a4c0127d97996d73c7d7dc0515d8a3 100644 (file)
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44)                      += -fsection-anchors
 cflags-$(CONFIG_ARC_HAS_LLSC)          += -mlock
 cflags-$(CONFIG_ARC_HAS_SWAPE)         += -mswape
 
+ifdef CONFIG_ISA_ARCV2
+
 ifndef CONFIG_ARC_HAS_LL64
-cflags-$(CONFIG_ISA_ARCV2)             += -mno-ll64
+cflags-y                               += -mno-ll64
+endif
+
+ifndef CONFIG_ARC_HAS_DIV_REM
+cflags-y                               += -mno-div-rem
+endif
+
 endif
 
 cflags-$(CONFIG_ARC_DW2_UNWIND)                += -fasynchronous-unwind-tables
index 070f58827a5c12c2e19469ff4280f7c69e0f36a3..c8f57b8449dcf6a36aa61cd3589b90ebba42d7ea 100644 (file)
 #define ECR_C_BIT_DTLB_LD_MISS         8
 #define ECR_C_BIT_DTLB_ST_MISS         9
 
-
 /* Auxiliary registers */
 #define AUX_IDENTITY           4
 #define AUX_INTR_VEC_BASE      0x25
-
+#define AUX_NON_VOL            0x5e
 
 /*
  * Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {
 
 struct bcr_perip {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int start:8, pad2:8, sz:8, pad:8;
+       unsigned int start:8, pad2:8, sz:8, ver:8;
 #else
-       unsigned int pad:8, sz:8, pad2:8, start:8;
+       unsigned int ver:8, sz:8, pad2:8, start:8;
 #endif
 };
 
index 03484cb4d16d2eb4fada0095ee427726c23bd2e1..87d18ae53115596f7b64a56a4a07a572d54c3cbd 100644 (file)
 
 #define atomic_set(v, i) (((v)->counter) = (i))
 
-#ifdef CONFIG_ISA_ARCV2
-#define PREFETCHW      "       prefetchw   [%1]        \n"
-#else
-#define PREFETCHW
+#ifdef CONFIG_ARC_STAR_9000923308
+
+#define SCOND_FAIL_RETRY_VAR_DEF                                               \
+       unsigned int delay = 1, tmp;                                            \
+
+#define SCOND_FAIL_RETRY_ASM                                                   \
+       "       bz      4f                      \n"                             \
+       "   ; --- scond fail delay ---          \n"                             \
+       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
+       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
+       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
+       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
+       "       b       1b                      \n"     /* start over */        \
+       "4: ; --- success ---                   \n"                             \
+
+#define SCOND_FAIL_RETRY_VARS                                                  \
+         ,[delay] "+&r" (delay),[tmp] "=&r"    (tmp)                           \
+
+#else  /* !CONFIG_ARC_STAR_9000923308 */
+
+#define SCOND_FAIL_RETRY_VAR_DEF
+
+#define SCOND_FAIL_RETRY_ASM                                                   \
+       "       bnz     1b                      \n"                             \
+
+#define SCOND_FAIL_RETRY_VARS
+
 #endif
 
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
-       unsigned int temp;                                              \
+       unsigned int val;                                               \
+       SCOND_FAIL_RETRY_VAR_DEF                                        \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:                             \n"                             \
-       PREFETCHW                                                       \
-       "       llock   %0, [%1]        \n"                             \
-       "       " #asm_op " %0, %0, %2  \n"                             \
-       "       scond   %0, [%1]        \n"                             \
-       "       bnz     1b              \n"                             \
-       : "=&r"(temp)   /* Early clobber, to prevent reg reuse */       \
-       : "r"(&v->counter), "ir"(i)                                     \
+       "1:     llock   %[val], [%[ctr]]                \n"             \
+       "       " #asm_op " %[val], %[val], %[i]        \n"             \
+       "       scond   %[val], [%[ctr]]                \n"             \
+       "                                               \n"             \
+       SCOND_FAIL_RETRY_ASM                                            \
+                                                                       \
+       : [val] "=&r"   (val) /* Early clobber to prevent reg reuse */  \
+         SCOND_FAIL_RETRY_VARS                                         \
+       : [ctr] "r"     (&v->counter), /* Not "m": llock only supports reg direct addr mode */  \
+         [i]   "ir"    (i)                                             \
        : "cc");                                                        \
 }                                                                      \
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
 static inline int atomic_##op##_return(int i, atomic_t *v)             \
 {                                                                      \
-       unsigned int temp;                                              \
+       unsigned int val;                                               \
+       SCOND_FAIL_RETRY_VAR_DEF                                        \
                                                                        \
        /*                                                              \
         * Explicit full memory barrier needed before/after as          \
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        smp_mb();                                                       \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:                             \n"                             \
-       PREFETCHW                                                       \
-       "       llock   %0, [%1]        \n"                             \
-       "       " #asm_op " %0, %0, %2  \n"                             \
-       "       scond   %0, [%1]        \n"                             \
-       "       bnz     1b              \n"                             \
-       : "=&r"(temp)                                                   \
-       : "r"(&v->counter), "ir"(i)                                     \
+       "1:     llock   %[val], [%[ctr]]                \n"             \
+       "       " #asm_op " %[val], %[val], %[i]        \n"             \
+       "       scond   %[val], [%[ctr]]                \n"             \
+       "                                               \n"             \
+       SCOND_FAIL_RETRY_ASM                                            \
+                                                                       \
+       : [val] "=&r"   (val)                                           \
+         SCOND_FAIL_RETRY_VARS                                         \
+       : [ctr] "r"     (&v->counter),                                  \
+         [i]   "ir"    (i)                                             \
        : "cc");                                                        \
                                                                        \
        smp_mb();                                                       \
                                                                        \
-       return temp;                                                    \
+       return val;                                                     \
 }
 
 #else  /* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
 
 /**
  * __atomic_add_unless - add unless the number is a given value
index 91694ec1ce959498fd5b4431962b03bbdf4119b7..69095da1fcfd1e35f16234aaf473896194064d38 100644 (file)
 struct pt_regs {
 
        /* Real registers */
-       long bta;       /* bta_l1, bta_l2, erbta */
+       unsigned long bta;      /* bta_l1, bta_l2, erbta */
 
-       long lp_start, lp_end, lp_count;
+       unsigned long lp_start, lp_end, lp_count;
 
-       long status32;  /* status32_l1, status32_l2, erstatus */
-       long ret;       /* ilink1, ilink2 or eret */
-       long blink;
-       long fp;
-       long r26;       /* gp */
+       unsigned long status32; /* status32_l1, status32_l2, erstatus */
+       unsigned long ret;      /* ilink1, ilink2 or eret */
+       unsigned long blink;
+       unsigned long fp;
+       unsigned long r26;      /* gp */
 
-       long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+       unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
 
-       long sp;        /* user/kernel sp depending on where we came from  */
-       long orig_r0;
+       unsigned long sp;       /* User/Kernel depending on where we came from */
+       unsigned long orig_r0;
 
        /*
         * To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
                unsigned long event;
        };
 
-       long user_r25;
+       unsigned long user_r25;
 };
 #else
 
 struct pt_regs {
 
-       long orig_r0;
+       unsigned long orig_r0;
 
        union {
                struct {
@@ -76,26 +76,26 @@ struct pt_regs {
                unsigned long event;
        };
 
-       long bta;       /* bta_l1, bta_l2, erbta */
+       unsigned long bta;      /* bta_l1, bta_l2, erbta */
 
-       long user_r25;
+       unsigned long user_r25;
 
-       long r26;       /* gp */
-       long fp;
-       long sp;        /* user/kernel sp depending on where we came from  */
+       unsigned long r26;      /* gp */
+       unsigned long fp;
+       unsigned long sp;       /* user/kernel sp depending on where we came from  */
 
-       long r12;
+       unsigned long r12;
 
        /*------- Below list auto saved by h/w -----------*/
-       long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
+       unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
 
-       long blink;
-       long lp_end, lp_start, lp_count;
+       unsigned long blink;
+       unsigned long lp_end, lp_start, lp_count;
 
-       long ei, ldi, jli;
+       unsigned long ei, ldi, jli;
 
-       long ret;
-       long status32;
+       unsigned long ret;
+       unsigned long status32;
 };
 
 #endif
@@ -103,10 +103,10 @@ struct pt_regs {
 /* Callee saved registers - need to be saved only when you are scheduled out */
 
 struct callee_regs {
-       long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+       unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
 };
 
-#define instruction_pointer(regs)      (unsigned long)((regs)->ret)
+#define instruction_pointer(regs)      ((regs)->ret)
 #define profile_pc(regs)               instruction_pointer(regs)
 
 /* return 1 if user mode or 0 if kernel mode */
@@ -142,7 +142,7 @@ struct callee_regs {
 
 static inline long regs_return_value(struct pt_regs *regs)
 {
-       return regs->r0;
+       return (long)regs->r0;
 }
 
 #endif /* !__ASSEMBLY__ */
index e1651df6a93d5bc8ab0af3a833c7c6ffd23acacc..db8c59d1eaeb760798c287a15720573ed58b9e4a 100644 (file)
 #define arch_spin_unlock_wait(x) \
        do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
 
+#ifdef CONFIG_ARC_HAS_LLSC
+
+/*
+ * A normal LLOCK/SCOND based system, w/o need for livelock workaround
+ */
+#ifndef CONFIG_ARC_STAR_9000923308
+
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
-       unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+       unsigned int val;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[slock]]      \n"
+       "       breq    %[val], %[LOCKED], 1b   \n"     /* spin while LOCKED */
+       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [slock]       "r"     (&(lock->slock)),
+         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       unsigned int val, got_it = 0;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[slock]]      \n"
+       "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
+       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
+       "       bnz     1b                      \n"
+       "       mov     %[got_it], 1            \n"
+       "4:                                     \n"
+       "                                       \n"
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+       : [slock]       "r"     (&(lock->slock)),
+         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       smp_mb();
+
+       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+       smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * zero means writer holds the lock exclusively, deny Reader.
+        * Otherwise grant lock to first/subseq reader
+        *
+        *      if (rw->counter > 0) {
+        *              rw->counter--;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 1b\n"     /* <= 0: spin while write locked */
+       "       sub     %[val], %[val], 1       \n"     /* reader lock */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
+       "       sub     %[val], %[val], 1       \n"     /* counter-- */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"     /* retry if collided with someone */
+       "       mov     %[got_it], 1            \n"
+       "                                       \n"
+       "4: ; --- done ---                      \n"
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+        * deny writer. Otherwise if unlocked grant to writer
+        * Hence the claim that Linux rwlocks are unfair to writers.
+        * (can be starved for an indefinite time by readers).
+        *
+        *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+        *              rw->counter = 0;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 1b \n"     /* while !UNLOCKED spin */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"     /* retry if collided with someone */
+       "       mov     %[got_it], 1            \n"
+       "                                       \n"
+       "4: ; --- done ---                      \n"
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * rw->counter++;
+        */
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       add     %[val], %[val], 1       \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter))
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+       smp_mb();
+
+       rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+
+       smp_mb();
+}
+
+#else  /* CONFIG_ARC_STAR_9000923308 */
+
+/*
+ * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
+ * coherency transactions in the SCU. The exclusive line state keeps rotating
+ * among contenting cores leading to a never ending cycle. So break the cycle
+ * by deferring the retry of failed exclusive access (SCOND). The actual delay
+ * needed is function of number of contending cores as well as the unrelated
+ * coherency traffic from other cores. To keep the code simple, start off with
+ * small delay of 1 which would suffice most cases and in case of contention
+ * double the delay. Eventually the delay is sufficient such that the coherency
+ * pipeline is drained, thus a subsequent exclusive access would succeed.
+ */
+
+#define SCOND_FAIL_RETRY_VAR_DEF                                               \
+       unsigned int delay, tmp;                                                \
+
+#define SCOND_FAIL_RETRY_ASM                                                   \
+       "   ; --- scond fail delay ---          \n"                             \
+       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
+       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
+       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
+       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
+       "       b       1b                      \n"     /* start over */        \
+       "                                       \n"                             \
+       "4: ; --- done ---                      \n"                             \
+
+#define SCOND_FAIL_RETRY_VARS                                                  \
+         ,[delay] "=&r" (delay), [tmp] "=&r"   (tmp)                           \
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       unsigned int val;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[slock]]      \n"
+       "       breq    %[val], %[LOCKED], 0b   \n"     /* spin while LOCKED */
+       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
+       "       bz      4f                      \n"     /* done */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val)
+         SCOND_FAIL_RETRY_VARS
+       : [slock]       "r"     (&(lock->slock)),
+         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       unsigned int val, got_it = 0;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[slock]]      \n"
+       "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
+       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
+       "       bz.d    4f                      \n"
+       "       mov.z   %[got_it], 1            \n"     /* got it */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+         SCOND_FAIL_RETRY_VARS
+       : [slock]       "r"     (&(lock->slock)),
+         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       smp_mb();
+
+       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+       smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       /*
+        * zero means writer holds the lock exclusively, deny Reader.
+        * Otherwise grant lock to first/subseq reader
+        *
+        *      if (rw->counter > 0) {
+        *              rw->counter--;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 0b\n"     /* <= 0: spin while write locked */
+       "       sub     %[val], %[val], 1       \n"     /* reader lock */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bz      4f                      \n"     /* done */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val)
+         SCOND_FAIL_RETRY_VARS
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
+       "       sub     %[val], %[val], 1       \n"     /* counter-- */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bz.d    4f                      \n"
+       "       mov.z   %[got_it], 1            \n"     /* got it */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+         SCOND_FAIL_RETRY_VARS
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       /*
+        * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+        * deny writer. Otherwise if unlocked grant to writer
+        * Hence the claim that Linux rwlocks are unfair to writers.
+        * (can be starved for an indefinite time by readers).
+        *
+        *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+        *              rw->counter = 0;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 0b \n"     /* while !UNLOCKED spin */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bz      4f                      \n"
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val)
+         SCOND_FAIL_RETRY_VARS
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bz.d    4f                      \n"
+       "       mov.z   %[got_it], 1            \n"     /* got it */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+         SCOND_FAIL_RETRY_VARS
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * rw->counter++;
+        */
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       add     %[val], %[val], 1       \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter))
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+        */
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       scond   %[UNLOCKED], [%[rwlock]]\n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "r"     (__ARCH_RW_LOCK_UNLOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
+
+#endif /* CONFIG_ARC_STAR_9000923308 */
+
+#else  /* !CONFIG_ARC_HAS_LLSC */
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 
        /*
         * This smp_mb() is technically superfluous, we only need the one
@@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        __asm__ __volatile__(
        "1:     ex  %0, [%1]            \n"
        "       breq  %0, %2, 1b        \n"
-       : "+&r" (tmp)
+       : "+&r" (val)
        : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
        : "memory");
 
@@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        smp_mb();
 }
 
+/* 1 - lock taken successfully */
 static inline int arch_spin_trylock(arch_spinlock_t *lock)
 {
-       unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+       unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 
        smp_mb();
 
        __asm__ __volatile__(
        "1:     ex  %0, [%1]            \n"
-       : "+r" (tmp)
+       : "+r" (val)
        : "r"(&(lock->slock))
        : "memory");
 
        smp_mb();
 
-       return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+       return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
 }
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-       unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+       unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
 
        /*
         * RELEASE barrier: given the instructions avail on ARCv2, full barrier
@@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 
        __asm__ __volatile__(
        "       ex  %0, [%1]            \n"
-       : "+r" (tmp)
+       : "+r" (val)
        : "r"(&(lock->slock))
        : "memory");
 
@@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 
 /*
  * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
  *
  * The spinlock itself is contained in @counter and access to it is
  * serialized with @lock_mutex.
- *
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
  */
 
-/* Would read_trylock() succeed? */
-#define arch_read_can_lock(x)  ((x)->counter > 0)
-
-/* Would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
 /* 1 - lock taken successfully */
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
@@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
        arch_spin_unlock(&(rw->lock_mutex));
 }
 
+#endif
+
+#define arch_read_can_lock(x)  ((x)->counter > 0)
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
 #define arch_read_lock_flags(lock, flags)      arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags)     arch_write_lock(lock)
 
index 662627ced4f23a966c85feffb9f9d38a4f7df10a..4e1ef5f650c6f2fc74ee1fbb09957d8d23e7b7da 100644 (file)
@@ -26,7 +26,9 @@ typedef struct {
  */
 typedef struct {
        volatile unsigned int   counter;
+#ifndef CONFIG_ARC_HAS_LLSC
        arch_spinlock_t         lock_mutex;
+#endif
 } arch_rwlock_t;
 
 #define __ARCH_RW_LOCK_UNLOCKED__      0x01000000
index 76a7739aab1c5173f397c0f8a5a79c5169489f41..0b3ef63d4a03b3ef2ff119535ee3c020641e1888 100644 (file)
 */
 struct user_regs_struct {
 
-       long pad;
+       unsigned long pad;
        struct {
-               long bta, lp_start, lp_end, lp_count;
-               long status32, ret, blink, fp, gp;
-               long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
-               long sp;
+               unsigned long bta, lp_start, lp_end, lp_count;
+               unsigned long status32, ret, blink, fp, gp;
+               unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+               unsigned long sp;
        } scratch;
-       long pad2;
+       unsigned long pad2;
        struct {
-               long r25, r24, r23, r22, r21, r20;
-               long r19, r18, r17, r16, r15, r14, r13;
+               unsigned long r25, r24, r23, r22, r21, r20;
+               unsigned long r19, r18, r17, r16, r15, r14, r13;
        } callee;
-       long efa;       /* break pt addr, for break points in delay slots */
-       long stop_pc;   /* give dbg stop_pc after ensuring brkpt trap */
+       unsigned long efa;      /* break pt addr, for break points in delay slots */
+       unsigned long stop_pc;  /* give dbg stop_pc after ensuring brkpt trap */
 };
 #endif /* !__ASSEMBLY__ */
 
index 18cc01591c96e64186a8b13c1aef5b8011091b12..cabde9dc0696479cc3a4d3074fd526cf89c85182 100644 (file)
@@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
        struct bcr_perip uncached_space;
        struct bcr_generic bcr;
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+       unsigned long perip_space;
        FIX_PTR(cpu);
 
        READ_BCR(AUX_IDENTITY, cpu->core);
@@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
        cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
 
        READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
-       BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE);
+        if (uncached_space.ver < 3)
+               perip_space = uncached_space.start << 24;
+       else
+               perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
+
+       BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
 
        READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
 
@@ -330,6 +336,10 @@ static void arc_chk_core_config(void)
                pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
        else if (!cpu->extn.fpu_dp && fpu_enabled)
                panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
+
+       if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+           !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
+               panic("llock/scond livelock workaround missing\n");
 }
 
 /*
index 3364d2bbc515471bba6478b8b34a417251ffde56..4294761a2b3e7ad3b36f5eca5bc26490e31ed61f 100644 (file)
@@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
        return 0;
 }
 
-static void arc_clkevent_set_mode(enum clock_event_mode mode,
-                                 struct clock_event_device *dev)
+static int arc_clkevent_set_periodic(struct clock_event_device *dev)
 {
-       switch (mode) {
-       case CLOCK_EVT_MODE_PERIODIC:
-                /*
-                 * At X Hz, 1 sec = 1000ms -> X cycles;
-                 *                    10ms -> X / 100 cycles
-                 */
-               arc_timer_event_setup(arc_get_core_freq() / HZ);
-               break;
-       case CLOCK_EVT_MODE_ONESHOT:
-               break;
-       default:
-               break;
-       }
-
-       return;
+       /*
+        * At X Hz, 1 sec = 1000ms -> X cycles;
+        *                    10ms -> X / 100 cycles
+        */
+       arc_timer_event_setup(arc_get_core_freq() / HZ);
+       return 0;
 }
 
 static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
-       .name           = "ARC Timer0",
-       .features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-       .mode           = CLOCK_EVT_MODE_UNUSED,
-       .rating         = 300,
-       .irq            = TIMER0_IRQ,   /* hardwired, no need for resources */
-       .set_next_event = arc_clkevent_set_next_event,
-       .set_mode       = arc_clkevent_set_mode,
+       .name                   = "ARC Timer0",
+       .features               = CLOCK_EVT_FEAT_ONESHOT |
+                                 CLOCK_EVT_FEAT_PERIODIC,
+       .rating                 = 300,
+       .irq                    = TIMER0_IRQ,   /* hardwired, no need for resources */
+       .set_next_event         = arc_clkevent_set_next_event,
+       .set_state_periodic     = arc_clkevent_set_periodic,
 };
 
 static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
         * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
         */
        struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
-       int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC;
+       int irq_reenable = clockevent_state_periodic(evt);
 
        /*
         * Any write to CTRL reg ACks the interrupt, we rewrite the
index 1b2b3acfed52df6f1fb0aad58fa834d3d689482d..0cab0b8a57c5665e6686e9bef843fbfa51f141fd 100644 (file)
@@ -206,7 +206,7 @@ unalignedOffby3:
        ld.ab   r6, [r1, 4]
        prefetch [r1, 28]       ;Prefetch the next read location
        ld.ab   r8, [r1,4]
-       prefetch [r3, 32]       ;Prefetch the next write location
+       prefetchw [r3, 32]      ;Prefetch the next write location
 
        SHIFT_1 (r7, r6, 8)
        or      r7, r7, r5
index 92d573c734b5b3d52dec2d8fcf6eb67cc96d16f6..365b183648154c70de1726955b9242e88d3cc60c 100644 (file)
 
 #undef PREALLOC_NOT_AVAIL
 
-#ifdef PREALLOC_NOT_AVAIL
-#define PREWRITE(A,B)  prefetchw [(A),(B)]
-#else
-#define PREWRITE(A,B)  prealloc [(A),(B)]
-#endif
-
 ENTRY(memset)
        prefetchw [r0]          ; Prefetch the write location
        mov.f   0, r2
@@ -51,9 +45,15 @@ ENTRY(memset)
 
 ;;; Convert len to Dwords, unfold x8
        lsr.f   lp_count, lp_count, 6
+
        lpnz    @.Lset64bytes
        ;; LOOP START
-       PREWRITE(r3, 64)        ;Prefetch the next write location
+#ifdef PREALLOC_NOT_AVAIL
+       prefetchw [r3, 64]      ;Prefetch the next write location
+#else
+       prealloc  [r3, 64]
+#endif
+#ifdef CONFIG_ARC_HAS_LL64
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
@@ -62,16 +62,45 @@ ENTRY(memset)
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
+#else
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+#endif
 .Lset64bytes:
 
        lsr.f   lp_count, r2, 5 ;Last remaining  max 124 bytes
        lpnz    .Lset32bytes
        ;; LOOP START
        prefetchw   [r3, 32]    ;Prefetch the next write location
+#ifdef CONFIG_ARC_HAS_LL64
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
+#else
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+#endif
 .Lset32bytes:
 
        and.f   lp_count, r2, 0x1F ;Last remaining 31 bytes
index 99f7da513a48462031a58815d48428509bad0848..e7769c3ab5f2b7793aff703ca5e926983b45ec29 100644 (file)
@@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
 
 static void __init axs103_early_init(void)
 {
+       /*
+        * AXS103 configurations for SMP/QUAD configurations share device tree
+        * which defaults to 90 MHz. However recent failures of Quad config
+        * revealed P&R timing violations so clamp it down to safe 50 MHz
+        * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
+        *
+        * This hack is really hacky as of now. Fix it properly by getting the
+        * number of cores as return value of platform's early SMP callback
+        */
+#ifdef CONFIG_ARC_MCIP
+       unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
+       if (num_cores > 2)
+               arc_set_core_freq(50 * 1000000);
+#endif
+
        switch (arc_get_core_freq()/1000000) {
        case 33:
                axs103_set_freq(1, 1, 1);
index 21fcc440fc1a9d886d408701a705ad4daabd2325..8b59c869c41268e42847ded07091010276d929f4 100644 (file)
                };
 
                mac: ethernet@4a100000 {
-                       compatible = "ti,cpsw";
+                       compatible = "ti,am335x-cpsw","ti,cpsw";
                        ti,hwmods = "cpgmac0";
                        clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
                        clock-names = "fck", "cpts";
index 8f1e25bcecbd76273f62671e8b9afa8f193ea261..6dbbc02d18b4d635dc10b21cc2f73b04b8490e49 100644 (file)
                                ranges = <0 0x2000 0x2000>;
 
                                scm_conf: scm_conf@0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon", "simple-bus";
                                        reg = <0x0 0x1400>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
                                ctrl-module = <&omap_control_sata>;
                                clocks = <&sys_clkin1>, <&sata_ref_clk>;
                                clock-names = "sysclk", "refclk";
+                               syscon-pllreset = <&scm_conf 0x3fc>;
                                #phy-cells = <0>;
                        };
 
                };
 
                mac: ethernet@4a100000 {
-                       compatible = "ti,cpsw";
+                       compatible = "ti,dra7-cpsw","ti,cpsw";
                        ti,hwmods = "gmac";
                        clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>;
                        clock-names = "fck", "cpts";
index d7201333e3bcd181d0a0281b3d214a6b5e92265a..2db99433e17fdad0299b672ae87e49b6444a457e 100644 (file)
 
                mipi_phy: video-phy@10020710 {
                        compatible = "samsung,s5pv210-mipi-video-phy";
-                       reg = <0x10020710 8>;
                        #phy-cells = <1>;
+                       syscon = <&pmu_system_controller>;
                };
 
                pd_cam: cam-power-domain@10023C00 {
index e0abfc3324d11eaed33838be9c04b7f1a167f5fb..e050d85cdacddf24268870988badefca45d75a88 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&buck1_reg>;
+};
+
 &fimd {
        pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
        pinctrl-names = "default";
index 98f3ce65cb9a387a55ee588069bf42b51103317c..ba34886f8b65b6227f82ef93c530603b64910449 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&varm_breg>;
+};
+
 &dsi_0 {
        vddcore-supply = <&vusb_reg>;
        vddio-supply = <&vmipi_reg>;
index d4f2b11319dd10d4d7b79fa295d55e63baccff9c..775892b2cc6a8d1564f1bf463abaa19df1a859a4 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&vdd_arm_reg>;
+};
+
 &pinctrl_1 {
        hdmi_hpd: hdmi-hpd {
                samsung,pins = "gpx3-7";
index 10d3c173396e4cb67a2443f2d3e641c264bec168..3e5ba665d20009de0a974c9ceccb283e431b3b54 100644 (file)
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0x900>;
+                       clocks = <&clock CLK_ARM_CLK>;
+                       clock-names = "cpu";
+                       clock-latency = <160000>;
+
+                       operating-points = <
+                               1200000 1250000
+                               1000000 1150000
+                               800000  1075000
+                               500000  975000
+                               400000  975000
+                               200000  950000
+                       >;
                        cooling-min-level = <4>;
                        cooling-max-level = <2>;
                        #cooling-cells = <2>; /* min followed by max */
index b6478e97d6a7eb8cbb6478470d4a5e10794afaff..e6540b5cfa4cac9c06d354be4fdea73c999cd85b 100644 (file)
                        can1: can@53fe4000 {
                                compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
                                reg = <0x53fe4000 0x1000>;
-                               clocks = <&clks 33>;
-                               clock-names = "ipg";
+                               clocks = <&clks 33>, <&clks 33>;
+                               clock-names = "ipg", "per";
                                interrupts = <43>;
                                status = "disabled";
                        };
                        can2: can@53fe8000 {
                                compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
                                reg = <0x53fe8000 0x1000>;
-                               clocks = <&clks 34>;
-                               clock-names = "ipg";
+                               clocks = <&clks 34>, <&clks 34>;
+                               clock-names = "ipg", "per";
                                interrupts = <44>;
                                status = "disabled";
                        };
index e6d13592080d7c701056c2f6a73326aa11e715b5..b57033e8c633187a5f52c367a788f46196967fdc 100644 (file)
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
-                       interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
                                 <&clks IMX6QDL_CLK_LVDS1_GATE>,
                                 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
index 4773d6af66a0ad8bfa2296a53737e0d82f3698ce..d56d68fe7ffc65788dee9867630433f263f08293 100644 (file)
@@ -13,9 +13,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
-               fixed-postdiv = <2>;
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
        };
 
        papllclk: papllclk@2620358 {
index 1b6494fbdb91b9301c607652efbd9a9fb34a9f36..675fb8e492c6aa0478a6d5df01b30fbe1e281b7d 100644 (file)
                                        <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
                        };
                };
+
+               mdio: mdio@24200f00 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x24200f00 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2e-netcp.dtsi"
        };
 };
-
-&mdio {
-       reg = <0x24200f00 0x100>;
-};
index d5adee3c006758076c4c6a8f693022893b29f4da..af9b7190533aa9c47bf9cd17a63a1d08901ede7d 100644 (file)
@@ -22,9 +22,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
-               fixed-postdiv = <2>;
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
        };
 
        papllclk: papllclk@2620358 {
index ae6472407b2277012096d733bb80951592555d03..d0810a5f296857394397c7f1c60bffa0011bb6e1 100644 (file)
                        #gpio-cells = <2>;
                        gpio,syscon-dev = <&devctrl 0x25c>;
                };
+
+               mdio: mdio@02090300 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x02090300 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2hk-netcp.dtsi"
        };
 };
index eb1e3e29f073856d76a1e47130bdd3639726d648..ef8464bb11ffd9833e24f2dbe4d61d348065dd1c 100644 (file)
@@ -22,9 +22,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
-               fixed-postdiv = <2>;
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
        };
 
        papllclk: papllclk@2620358 {
index 0e007483615e4f097bb747a2d882b2e2d3a030aa..49fd414f680c93ab50cf0dae72d2e9261181da21 100644 (file)
@@ -29,7 +29,6 @@
        };
 
        soc {
-
                /include/ "k2l-clocks.dtsi"
 
                uart2: serial@02348400 {
                        #gpio-cells = <2>;
                        gpio,syscon-dev = <&devctrl 0x24c>;
                };
+
+               mdio: mdio@26200f00 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x26200f00 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2l-netcp.dtsi"
        };
 };
        /* Pin muxed. Enabled and configured by Bootloader */
        status = "disabled";
 };
-
-&mdio {
-       reg = <0x26200f00 0x100>;
-};
index e7a6f6deabb6c0d89d4ca1e2c2ae63639249d010..72816d65f7ec3fcf5d7c47ce792ae57db369754b 100644 (file)
                                  1 0 0x21000A00 0x00000100>;
                };
 
-               mdio: mdio@02090300 {
-                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       reg             = <0x02090300 0x100>;
-                       status = "disabled";
-                       clocks = <&clkpa>;
-                       clock-names = "fck";
-                       bus_freq        = <2500000>;
-               };
-
                kirq0: keystone_irq@26202a0 {
                        compatible = "ti,keystone-irq";
                        interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
index 11a7963be0035a002fa77c2bec6809b34444e584..2390f387c27163bb76e918bb73e26966bee7fb48 100644 (file)
@@ -51,7 +51,8 @@
                                };
 
                                scm_conf: scm_conf@270 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x270 0x240>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index 7d31c6ff246f47b14afd5eeb332d01a955faef35..abc4473e6f8a17e51d5e66416be089ab24d7b472 100644 (file)
                                };
 
                                omap4_padconf_global: omap4_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0x170>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index c8fd648a7108515def0e9492936fd3760f156579..b1a1263e600168291091a963f9f56aefc87fd59e 100644 (file)
                                };
 
                                omap5_padconf_global: omap5_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0xec>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index a75f3289e653ab2973e2d7dd1cb12c8a12724451..b8f81fb418ce60039ad4e8e04f2892ca34d26bc8 100644 (file)
 #include "skeleton.dtsi"
 
 / {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               enable-method = "ste,dbx500-smp";
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                       };
+               };
+               CPU0: cpu@300 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a9";
+                       reg = <0x300>;
+               };
+               CPU1: cpu@301 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a9";
+                       reg = <0x301>;
+               };
+       };
+
        soc {
                #address-cells = <1>;
                #size-cells = <1>;
                interrupt-parent = <&intc>;
                ranges;
 
-               cpus {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       cpu-map {
-                               cluster0 {
-                                       core0 {
-                                               cpu = <&CPU0>;
-                                       };
-                                       core1 {
-                                               cpu = <&CPU1>;
-                                       };
-                               };
-                       };
-                       CPU0: cpu@0 {
-                               device_type = "cpu";
-                               compatible = "arm,cortex-a9";
-                               reg = <0>;
-                       };
-                       CPU1: cpu@1 {
-                               device_type = "cpu";
-                               compatible = "arm,cortex-a9";
-                               reg = <1>;
-                       };
-               };
-
                ptm@801ae000 {
                        compatible = "arm,coresight-etm3x", "arm,primecell";
                        reg = <0x801ae000 0x1000>;
index 3d0b8755caeee62f77ac214d343ab40cebba2ce0..3d25dba143a5d7f4a254460343161ad5e9c4e6ff 100644 (file)
@@ -17,6 +17,7 @@
        };
 
        aliases {
+               serial1 = &uart1;
                stmpe-i2c0 = &stmpe0;
                stmpe-i2c1 = &stmpe1;
        };
index 85d3b95dfdba55b543aa0f66733df936d5074abc..3c140d05f7966c79ae0a0e9ed323baaf2a80adb6 100644 (file)
                bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
        };
 
+       aliases {
+               serial1 = &uart1;
+       };
+
        src@101e0000 {
                /* These chrystal drivers are not used on this board */
                disable-sxtalo;
index 9a5f2ba139b7376018d8e48a6263e2ad711fa865..ef794a33b4dcc2ddf2076a0e27ff948e01d67d44 100644 (file)
                        clock-names = "uartclk", "apb_pclk";
                        pinctrl-names = "default";
                        pinctrl-0 = <&uart0_default_mux>;
+                       status = "disabled";
                };
 
                uart1: uart@101fb000 {
index 92828a1dec80c1c33d051d9b76063727598495d5..b48dd4f37f8067e781ee3e135ed7aff27940371f 100644 (file)
@@ -61,6 +61,7 @@ work_pending:
        movlt   scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
        ldmia   sp, {r0 - r6}                   @ have to reload r0 - r6
        b       local_restart                   @ ... and off we go
+ENDPROC(ret_fast_syscall)
 
 /*
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
index bd755d97e459d77ff05cc8a1264f336c58c1b598..29e2991465cb27b579f729deec65e2293a0a04b5 100644 (file)
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
        sub     lr, r4, r5                      @ mmu has been enabled
        add     r3, r7, lr
        ldrd    r4, [r3, #0]                    @ get secondary_data.pgdir
+ARM_BE8(eor    r4, r4, r5)                     @ Swap r5 and r4 in BE:
+ARM_BE8(eor    r5, r4, r5)                     @ it can be done in 3 steps
+ARM_BE8(eor    r4, r4, r5)                     @ without using a temp reg.
        ldr     r8, [r3, #8]                    @ get secondary_data.swapper_pg_dir
        badr    lr, __enable_mmu                @ return address
        mov     r13, r12                        @ __secondary_switched address
index efe17dd9b9218b7ef16299700a0f2a6d74ca61c1..54a5aeab988d3526657b8e3089942ca8cfe4fe5e 100644 (file)
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
  */
 void update_vsyscall(struct timekeeper *tk)
 {
-       struct timespec xtime_coarse;
        struct timespec64 *wtm = &tk->wall_to_monotonic;
 
        if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
 
        vdso_write_begin(vdso_data);
 
-       xtime_coarse = __current_kernel_time();
        vdso_data->tk_is_cntvct                 = tk_is_cntvct(tk);
-       vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
-       vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
+       vdso_data->xtime_coarse_sec             = tk->xtime_sec;
+       vdso_data->xtime_coarse_nsec            = (u32)(tk->tkr_mono.xtime_nsec >>
+                                                       tk->tkr_mono.shift);
        vdso_data->wtm_clock_sec                = wtm->tv_sec;
        vdso_data->wtm_clock_nsec               = wtm->tv_nsec;
 
index 6001f1c9d136f45fabd7d61e97638855d0beb46a..4a87e86dec45d1546153ca0ebb7310bbd5f82d93 100644 (file)
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
                pd->base = of_iomap(np, 0);
                if (!pd->base) {
                        pr_warn("%s: failed to map memory\n", __func__);
-                       kfree(pd->pd.name);
+                       kfree_const(pd->pd.name);
                        kfree(pd);
-                       of_node_put(np);
                        continue;
                }
 
index d78c12e7cb5e1ace5f79a9d28f45321e809dea24..486cc4ded1906670c053f110bb09aeee34ea7fa3 100644 (file)
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
  * registers.  This address is needed early so the OCP registers that
  * are part of the device's address space can be ioremapped properly.
  *
+ * If SYSC access is not needed, the registers will not be remapped
+ * and non-availability of MPU access is not treated as an error.
+ *
  * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
  * -ENXIO on absent or invalid register target address space.
  */
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
 
        _save_mpu_port_index(oh);
 
+       /* if we don't need sysc access we don't need to ioremap */
+       if (!oh->class->sysc)
+               return 0;
+
+       /* we can't continue without MPU PORT if we need sysc access */
        if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
                return -ENXIO;
 
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
                         oh->name);
 
                /* Extract the IO space from device tree blob */
-               if (!np)
+               if (!np) {
+                       pr_err("omap_hwmod: %s: no dt node\n", oh->name);
                        return -ENXIO;
+               }
 
                va_start = of_iomap(np, index + oh->mpu_rt_idx);
        } else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
                                oh->name, np->name);
        }
 
-       if (oh->class->sysc) {
-               r = _init_mpu_rt_base(oh, NULL, index, np);
-               if (r < 0) {
-                       WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
-                            oh->name);
-                       return 0;
-               }
+       r = _init_mpu_rt_base(oh, NULL, index, np);
+       if (r < 0) {
+               WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+                    oh->name);
+               return 0;
        }
 
        r = _init_clocks(oh, NULL);
index 2606c6608bd8b5c72793cbcc11acc1e2ccee4301..562247bced496bc085f75cb65f5061f8ec2be6c1 100644 (file)
@@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
        .syss_offs      = 0x0014,
        .sysc_flags     = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
                           SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          SIDLE_SMART_WKUP),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
        .sysc_fields    = &omap_hwmod_sysc_type1,
 };
 
@@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
        .class          = &dra7xx_gpmc_hwmod_class,
        .clkdm_name     = "l3main1_clkdm",
        /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
-       .flags          = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS,
+       .flags          = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
        .main_clk       = "l3_iclk_div",
        .prcm = {
                .omap4 = {
index 9d259d94e429c4cc493542ad4cf238a513b13743..1160434eece0509c3797733b49e8fcb1262e42e7 100644 (file)
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
 VDSO_LDFLAGS += -nostdlib -shared
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
 
 obj-$(CONFIG_VDSO) += vdso.o
 extra-$(CONFIG_VDSO) += vdso.lds
index 0689c3fb56e3d84fe3ed7790f4a6b25835c86555..58093edeea2e5b7513321ff5ddfcb305ac873638 100644 (file)
                        device_type = "dma";
                        reg = <0x0 0x1f270000 0x0 0x10000>,
                              <0x0 0x1f200000 0x0 0x10000>,
-                             <0x0 0x1b008000 0x0 0x2000>,
+                             <0x0 0x1b000000 0x0 0x400000>,
                              <0x0 0x1054a000 0x0 0x100>;
                        interrupts = <0x0 0x82 0x4>,
                                     <0x0 0xb8 0x4>,
index 1670f15ef69e34972986081deb9b1f87b0bb2bb3..948f0ad2de231b5e3f5efa62e204162cadf26503 100644 (file)
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitely for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
                break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE))
index ec37ab3f524f303419d2cc3a82b79c119e61de1d..97bc68f4c689f28eac7188f5e0b792b5293c37da 100644 (file)
@@ -199,16 +199,15 @@ up_fail:
  */
 void update_vsyscall(struct timekeeper *tk)
 {
-       struct timespec xtime_coarse;
        u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
 
        ++vdso_data->tb_seq_count;
        smp_wmb();
 
-       xtime_coarse = __current_kernel_time();
        vdso_data->use_syscall                  = use_syscall;
-       vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
-       vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
+       vdso_data->xtime_coarse_sec             = tk->xtime_sec;
+       vdso_data->xtime_coarse_nsec            = tk->tkr_mono.xtime_nsec >>
+                                                       tk->tkr_mono.shift;
        vdso_data->wtm_clock_sec                = tk->wall_to_monotonic.tv_sec;
        vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
 
index cee5f93e5712f3120d36847dc02d74709bc21929..199a8357838cb24bde2ce5ed5dec7db62feb8d2e 100644 (file)
@@ -151,7 +151,6 @@ config BMIPS_GENERIC
        select BCM7120_L2_IRQ
        select BRCMSTB_L2_IRQ
        select IRQ_MIPS_CPU
-       select RAW_IRQ_ACCESSORS
        select DMA_NONCOHERENT
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
index 01a644f174dd08e34843ca501035b077d777bea2..1ba21204ebe021ee164a9f8f4828dd3f3c836f84 100644 (file)
@@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
 {
        return ATH79_MISC_IRQ(5);
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index 56f5d080ef9d6cb698ba70cf7027783167000284..b7fa9ae28c3659dbf457aecd7cd17255cd34f5da 100644 (file)
@@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
        cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
 
        if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+               generic_smp_call_function_interrupt();
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
 
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644 (file)
index 11d3b57..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush    bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
index 9d810675814291d14ce004f9d4447678af2227c3..ae85694752644339af22557a5ae424cde8271400 100644 (file)
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
                 * Make sure the buddy is global too (if it's !none,
                 * it better already be global)
                 */
+#ifdef CONFIG_SMP
+               /*
+                * For SMP, multiple CPUs can race, so we need to do
+                * this atomically.
+                */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+               unsigned long page_global = _PAGE_GLOBAL;
+               unsigned long tmp;
+
+               __asm__ __volatile__ (
+                       "       .set    push\n"
+                       "       .set    noreorder\n"
+                       "1:     " LL_INSN "     %[tmp], %[buddy]\n"
+                       "       bnez    %[tmp], 2f\n"
+                       "        or     %[tmp], %[tmp], %[global]\n"
+                       "       " SC_INSN "     %[tmp], %[buddy]\n"
+                       "       beqz    %[tmp], 1b\n"
+                       "        nop\n"
+                       "2:\n"
+                       "       .set pop"
+                       : [buddy] "+m" (buddy->pte),
+                         [tmp] "=&r" (tmp)
+                       : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
                if (pte_none(*buddy))
                        pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
        }
 #endif
 }
index 16f1ea9ab191234ee8dc5599803b91dcc2ccf745..03722d4326a1aad05935b58805ec8d881703201e 100644 (file)
@@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
 extern void play_dead(void);
 #endif
 
-extern asmlinkage void smp_call_function_interrupt(void);
-
 static inline void arch_send_call_function_single_ipi(int cpu)
 {
        extern struct plat_smp_ops *mp_ops;     /* private */
index 28d6d9364bd1f2c431df08c72f58262e5297ec5c..a71da576883c8f4b1a3d60279ebfaefb95798031 100644 (file)
                .set    noreorder
                bltz    k0, 8f
                 move   k1, sp
+#ifdef CONFIG_EVA
+               /*
+                * Flush interAptiv's Return Prediction Stack (RPS) by writing
+                * EntryHi. Toggling Config7.RPS is slower and less portable.
+                *
+                * The RPS isn't automatically flushed when exceptions are
+                * taken, which can result in kernel mode speculative accesses
+                * to user addresses if the RPS mispredicts. That's harmless
+                * when user and kernel share the same address space, but with
+                * EVA the same user segments may be unmapped to kernel mode,
+                * even containing sensitive MMIO regions or invalid memory.
+                *
+                * This can happen when the kernel sets the return address to
+                * ret_from_* and jr's to the exception handler, which looks
+                * more like a tail call than a function call. If nested calls
+                * don't evict the last user address in the RPS, it will
+                * mispredict the return and fetch from a user controlled
+                * address into the icache.
+                *
+                * More recent EVA-capable cores with MAAR to restrict
+                * speculative accesses aren't affected.
+                */
+               MFC0    k0, CP0_ENTRYHI
+               MTC0    k0, CP0_ENTRYHI
+#endif
                .set    reorder
                /* Called from user mode, new stack. */
                get_saved_sp
index 3e4491aa6d6b2425865e1d1a3a909cf05aaa4e28..789d7bf4fef3203b3038a9ddaf20c5f70f1bc948 100644 (file)
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
                                      unsigned long __user *user_mask_ptr)
 {
        unsigned int real_len;
-       cpumask_t mask;
+       cpumask_t allowed, mask;
        int retval;
        struct task_struct *p;
 
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
        if (retval)
                goto out_unlock;
 
-       cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+       cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+       cpumask_and(&mask, &allowed, cpu_active_mask);
 
 out_unlock:
        read_unlock(&tasklist_lock);
index b130033838ba0c391ef4f9f0c7aa19e5a50b6eb7..5fcec3032f38f6aebdf668af3318997e4f53921f 100644 (file)
@@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
        return mips_machine_name;
 }
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
        return add_memory_region(base, size, BOOT_MEM_RAM);
index 74bab9ddd0e1984c9d4e4e95c038bb1d269b60dd..c6bbf21650515d1e71eead45b41a7729f8794476 100644 (file)
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
 
 process_entry:
        PTR_L           s2, (s0)
-       PTR_ADD         s0, s0, SZREG
+       PTR_ADDIU       s0, s0, SZREG
 
        /*
         * In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
        /* copy page word by word */
        REG_L           s5, (s2)
        REG_S           s5, (s4)
-       PTR_ADD         s4, s4, SZREG
-       PTR_ADD         s2, s2, SZREG
-       LONG_SUB        s6, s6, 1
+       PTR_ADDIU       s4, s4, SZREG
+       PTR_ADDIU       s2, s2, SZREG
+       LONG_ADDIU      s6, s6, -1
        beq             s6, zero, process_entry
        b               copy_word
        b               process_entry
index ad4d44635c7601162ca0dd8f1b626df28eeeafb2..a6f6b762c47a4c5a2d395e13a1d564964595abe1 100644 (file)
@@ -80,7 +80,7 @@ syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_64_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 446cc654da56c5f5fcaad749242dd98d593776e1..4b2010654c463158b7dee80194de736195c04595 100644 (file)
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_N32_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 19a7705f2a015ef4b38e1cd0a16eb22c2a2d3ca3..5d7f2634996fd4920f0a4c94e00cd42fae5934b1 100644 (file)
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, 3*sizeof(int)) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE32))
index 336708ae5c5b4c74b75416058feabb4bef5e30b1..78cf8c2f1de0e8790923d25ab6e42a85e53a6fe9 100644 (file)
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
        if (action == 0)
                scheduler_ipi();
        else
-               smp_call_function_interrupt();
+               generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
        if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+               generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
index d0744cc77ea7f7a02d94c96faf190787f7e88f64..a31896c33716d424bb30397c17b29af07c6728bb 100644 (file)
@@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
        cpu_startup_entry(CPUHP_ONLINE);
 }
 
-/*
- * Call into both interrupt handlers, as we share the IPI for them
- */
-void __irq_entry smp_call_function_interrupt(void)
-{
-       irq_enter();
-       generic_smp_call_function_interrupt();
-       irq_exit();
-}
-
 static void stop_this_cpu(void *dummy)
 {
        /*
index e207a43b5f8f0bcbf0544e5289cfc08126cbc7f5..8ea28e6ab37dead56439dc37871b6b18e8ec02d5 100644 (file)
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
 void show_stack(struct task_struct *task, unsigned long *sp)
 {
        struct pt_regs regs;
+       mm_segment_t old_fs = get_fs();
        if (sp) {
                regs.regs[29] = (unsigned long)sp;
                regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
                        prepare_frametrace(&regs);
                }
        }
+       /*
+        * show_stack() deals exclusively with kernel mode, so be sure to access
+        * the stack in the kernel (not user) address space.
+        */
+       set_fs(KERNEL_DS);
        show_stacktrace(task, &regs);
+       set_fs(old_fs);
 }
 
 static void show_code(unsigned int __user *pc)
@@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
        const int field = 2 * sizeof(unsigned long);
        int multi_match = regs->cp0_status & ST0_TS;
        enum ctx_state prev_state;
+       mm_segment_t old_fs = get_fs();
 
        prev_state = exception_enter();
        show_regs(regs);
@@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
                dump_tlb_all();
        }
 
+       if (!user_mode(regs))
+               set_fs(KERNEL_DS);
+
        show_code((unsigned int __user *) regs->cp0_epc);
 
+       set_fs(old_fs);
+
        /*
         * Some chips may have other causes of machine check (e.g. SB1
         * graduation timer)
index af84bef0c90de4bc65e14669dca132ebb7147846..eb3efd137fd17cdb6e1defa163744a480ad16185 100644 (file)
@@ -438,7 +438,7 @@ do {                                                        \
                : "memory");                                \
 } while(0)
 
-#define     StoreDW(addr, value, res) \
+#define     _StoreDW(addr, value, res) \
 do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
index 6ab10573490de8a2d5a449e45c3585547c51f7c4..2c218c3bbca57be3d029cdb3320b712092bccd46 100644 (file)
@@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
        return IRQ_HANDLED;
 }
 
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
 {
        return ltq_perfcount_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index 509877c6e9d908d7bac6110982c7208ab69204af..1a4738a8f2d3906ccffb58bdf8d9b35ee4b04ef3 100644 (file)
@@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
 
-       if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+       if (action & SMP_CALL_FUNCTION) {
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
+       }
 
        if (action & SMP_ASK_C0COUNT) {
                BUG_ON(cpu != 0);
index 77d96db8253c422ac9e48d93e02c6b6f39b41c1b..aab218c36e0d3e2f7669c47343e583e527103169 100644 (file)
@@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
                protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
                protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
                protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
-               protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+               protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
                protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
-               protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+               protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
                protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 
                protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
                protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
                protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
                protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
-               protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+               protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
                protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
-               protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE  | _PAGE_NO_READ);
+               protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
                protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
 
        } else {
index 36c0f26fac6b0780318958a59fc2665a444a10ea..852a41c6da4507080d611dce0b1fc206caf30556 100644 (file)
@@ -133,7 +133,8 @@ good_area:
 #endif
                                goto bad_area;
                        }
-                       if (!(vma->vm_flags & VM_READ)) {
+                       if (!(vma->vm_flags & VM_READ) &&
+                           exception_epc(regs) != address) {
 #if 0
                                pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
                                          raw_smp_processor_id(),
index d1392f8f5811f65ec72445026ac12c4fe15fe6b1..fa8f591f371361ba6fe3654617e6f44690a48a25 100644 (file)
@@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
index 5625b190edc061afbf2a8885e976b48014270325..b7bf721eabf5411bfb2f55b6b7d2cdc9ba887f96 100644 (file)
@@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
 
        return mips_cpu_perf_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
@@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
 
 static void __init init_rtc(void)
 {
-       /* stop the clock whilst setting it up */
-       CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
+       unsigned char freq, ctrl;
 
-       /* 32KHz time base */
-       CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+       /* Set 32KHz time base if not already set */
+       freq = CMOS_READ(RTC_FREQ_SELECT);
+       if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
+               CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
 
-       /* start the clock */
-       CMOS_WRITE(RTC_24H, RTC_CONTROL);
+       /* Ensure SET bit is clear so RTC can run */
+       ctrl = CMOS_READ(RTC_CONTROL);
+       if (ctrl & RTC_SET)
+               CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
 }
 
 void __init plat_time_init(void)
index e1d69895fb1de44f5d8503027f86ebb50f40d5a6..a120b7a5a8fe4e9af03ccb40fc9e7e123f88d633 100644 (file)
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
                return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
        return -1;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index dc3e327fbbac105e71c6b89a039e0f79f91e6f3d..f5fff228b347b6da07d68212fb11f4ae140548c8 100644 (file)
@@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
 {
        clear_c0_eimr(irq);
        ack_c0_eirr(irq);
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
        set_c0_eimr(irq);
 }
 
index 42181c7105df70992892ead68933bcd5375ab74b..f8d3e081b2ebc77e6752dc10a61a69e9a8172b3d 100644 (file)
@@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
        return IRQ_HANDLED;
 }
 
index 7c73fcb92a108799866c8d603019129d33c57ee6..8a377346f0cabbf5ce91199ca039ec013b16dda1 100644 (file)
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
 {
        return gic_get_c0_perfcount_int();
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 int get_c0_fdc_int(void)
 {
index 10170580a2def4501bb4f149c707d050a900246f..ffa0f7101a9773ec8e24813f37e3c270d912b5e2 100644 (file)
@@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
index 53707aacc0f86cb13134546de07ccaf71d76321d..8c624a8b9ea29f5611abceb531de09c865e46b7c 100644 (file)
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
 {
        return rt_perfcount_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index 3fbaef97a1b8d31791e8999bd222e3e1b01c3701..16ec4e12daa3fb7bed3355a5cd56cdb3c87946fc 100644 (file)
@@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
                scheduler_ipi();
        } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
                LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
-               smp_call_function_interrupt();
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
        } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
                LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
-               smp_call_function_interrupt();
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
        } else
 #endif
        {
index af7d44edd9a8f118b79944ecf21310634f308b56..4c71aea2566372c3f3af8627c79e91fb9aede4ea 100644 (file)
@@ -29,8 +29,6 @@
 #include <asm/sibyte/bcm1480_regs.h>
 #include <asm/sibyte/bcm1480_int.h>
 
-extern void smp_call_function_interrupt(void);
-
 /*
  * These are routines for dealing with the bcm1480 smp capabilities
  * independent of board/firmware
@@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
 
-       if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+       if (action & SMP_CALL_FUNCTION) {
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
+       }
 }
index c0c4b3f88a086f2c331cce0b311d9c547d3b5a6a..1cf66f5ff23d1a5afca26ffd9bc638566d8f68cb 100644 (file)
@@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
 
-       if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+       if (action & SMP_CALL_FUNCTION) {
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
+       }
 }
index d3a831ac0f927e17304d55c406c60029ad55a4e0..da50e0c9c57e69af8779f0df979231104704ab6e 100644 (file)
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
 
 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, 3*sizeof(int)) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE32))
index 5cf5e6ea213baaeee1b6e017636eae95aba6b0c6..7cf0df859d0536bcfd4752d4b581da810873196b 100644 (file)
@@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
        }
 
        /* Unmask the event */
-       if (eeh_enabled())
+       if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
                enable_irq(eeh_event_irq);
 
        return ret;
index 5738d315248b202b4a26aff084b07e819a80855c..85cbc96eff6cbd60e3e6bb9bf126091181eb5ef1 100644 (file)
@@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
 
 static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
                unsigned levels, unsigned long limit,
-               unsigned long *current_offset)
+               unsigned long *current_offset, unsigned long *total_allocated)
 {
        struct page *tce_mem = NULL;
        __be64 *addr, *tmp;
@@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
        }
        addr = page_address(tce_mem);
        memset(addr, 0, allocated);
+       *total_allocated += allocated;
 
        --levels;
        if (!levels) {
@@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
 
        for (i = 0; i < entries; ++i) {
                tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
-                               levels, limit, current_offset);
+                               levels, limit, current_offset, total_allocated);
                if (!tmp)
                        break;
 
@@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
                struct iommu_table *tbl)
 {
        void *addr;
-       unsigned long offset = 0, level_shift;
+       unsigned long offset = 0, level_shift, total_allocated = 0;
        const unsigned window_shift = ilog2(window_size);
        unsigned entries_shift = window_shift - page_shift;
        unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 
        /* Allocate TCE table */
        addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
-                       levels, tce_table_size, &offset);
+                       levels, tce_table_size, &offset, &total_allocated);
 
        /* addr==NULL means that the first level allocation failed */
        if (!addr)
@@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
                        page_shift);
        tbl->it_level_size = 1ULL << (level_shift - 3);
        tbl->it_indirect_levels = levels - 1;
-       tbl->it_allocated_size = offset;
+       tbl->it_allocated_size = total_allocated;
 
        pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
                        window_size, tce_table_size, bus_offset);
index 2078f92d15ac90adcfec617b46df750e073b76d0..f32f843a3631359e49b88169ab8a1eed2b76b946 100644 (file)
@@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
 
 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->requests)
-               return 0;
 retry:
        kvm_s390_vcpu_request_handled(vcpu);
+       if (!vcpu->requests)
+               return 0;
        /*
         * We use MMU_RELOAD just to re-arm the ipte notifier for the
         * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
index 9f4bbc09bf07b634092aede3058d89226ef60f97..eeda051442c3d0de0bec154b9aa3f15fe85e6de8 100644 (file)
@@ -1032,7 +1032,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                                      MAX_TAIL_CALL_CNT, 0, 0x2);
 
                /*
-                * prog = array->prog[index];
+                * prog = array->ptrs[index];
                 * if (prog == NULL)
                 *         goto out;
                 */
@@ -1041,7 +1041,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
                /* lg %r1,prog(%b2,%r1) */
                EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
-                             REG_1, offsetof(struct bpf_array, prog));
+                             REG_1, offsetof(struct bpf_array, ptrs));
                /* clgij %r1,0,0x8,label0 */
                EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
 
index 1f0aa2024e94be341efc079f58145a2e70909052..6424249d5f785e698c4283677c1b975668393478 100644 (file)
  * Must preserve %o5 between VISEntryHalf and VISExitHalf */
 
 #define VISEntryHalf                                   \
-       rd              %fprs, %o5;                     \
-       andcc           %o5, FPRS_FEF, %g0;             \
-       be,pt           %icc, 297f;                     \
-        sethi          %hi(298f), %g7;                 \
-       sethi           %hi(VISenterhalf), %g1;         \
-       jmpl            %g1 + %lo(VISenterhalf), %g0;   \
-        or             %g7, %lo(298f), %g7;            \
-       clr             %o5;                            \
-297:   wr              %o5, FPRS_FEF, %fprs;           \
-298:
+       VISEntry
+
+#define VISExitHalf                                    \
+       VISExit
 
 #define VISEntryHalfFast(fail_label)                   \
        rd              %fprs, %o5;                     \
@@ -47,7 +41,7 @@
        ba,a,pt         %xcc, fail_label;               \
 297:   wr              %o5, FPRS_FEF, %fprs;
 
-#define VISExitHalf                                    \
+#define VISExitHalfFast                                        \
        wr              %o5, 0, %fprs;
 
 #ifndef __ASSEMBLY__
index 140527a20e7df03cc0a0dd9e6a3438f44b432177..83aeeb1dffdb3b4c29293d5924cd5259e2269ce5 100644 (file)
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
        add             %o0, 0x40, %o0
        bne,pt          %icc, 1b
         LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+#ifdef NON_USER_COPY
+       VISExitHalfFast
+#else
        VISExitHalf
-
+#endif
        brz,pn          %o2, .Lexit
         cmp            %o2, 19
        ble,pn          %icc, .Lsmall_unaligned
index b320ae9e2e2e8b27c7184f58ddc640b4adf6fdda..a063d84336d6384a03d7ddd8a5143f0909801ed8 100644 (file)
@@ -44,9 +44,8 @@ vis1: ldub            [%g6 + TI_FPSAVED], %g3
 
         stx            %g3, [%g6 + TI_GSR]
 2:     add             %g6, %g1, %g3
-       cmp             %o5, FPRS_DU
-       be,pn           %icc, 6f
-        sll            %g1, 3, %g1
+       mov             FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+       sll             %g1, 3, %g1
        stb             %o5, [%g3 + TI_FPSAVED]
        rd              %gsr, %g2
        add             %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1:        ldub            [%g6 + TI_FPSAVED], %g3
        .align          32
 80:    jmpl            %g7 + %g0, %g0
         nop
-
-6:     ldub            [%g3 + TI_FPSAVED], %o5
-       or              %o5, FPRS_DU, %o5
-       add             %g6, TI_FPREGS+0x80, %g2
-       stb             %o5, [%g3 + TI_FPSAVED]
-
-       sll             %g1, 5, %g1
-       add             %g6, TI_FPREGS+0xc0, %g3
-       wr              %g0, FPRS_FEF, %fprs
-       membar          #Sync
-       stda            %f32, [%g2 + %g1] ASI_BLK_P
-       stda            %f48, [%g3 + %g1] ASI_BLK_P
-       membar          #Sync
-       ba,pt           %xcc, 80f
-        nop
-
-       .align          32
-80:    jmpl            %g7 + %g0, %g0
-        nop
-
-       .align          32
-VISenterhalf:
-       ldub            [%g6 + TI_FPDEPTH], %g1
-       brnz,a,pn       %g1, 1f
-        cmp            %g1, 1
-       stb             %g0, [%g6 + TI_FPSAVED]
-       stx             %fsr, [%g6 + TI_XFSR]
-       clr             %o5
-       jmpl            %g7 + %g0, %g0
-        wr             %g0, FPRS_FEF, %fprs
-
-1:     bne,pn          %icc, 2f
-        srl            %g1, 1, %g1
-       ba,pt           %xcc, vis1
-        sub            %g7, 8, %g7
-2:     addcc           %g6, %g1, %g3
-       sll             %g1, 3, %g1
-       andn            %o5, FPRS_DU, %g2
-       stb             %g2, [%g3 + TI_FPSAVED]
-
-       rd              %gsr, %g2
-       add             %g6, %g1, %g3
-       stx             %g2, [%g3 + TI_GSR]
-       add             %g6, %g1, %g2
-       stx             %fsr, [%g2 + TI_XFSR]
-       sll             %g1, 5, %g1
-3:     andcc           %o5, FPRS_DL, %g0
-       be,pn           %icc, 4f
-        add            %g6, TI_FPREGS, %g2
-
-       add             %g6, TI_FPREGS+0x40, %g3
-       membar          #Sync
-       stda            %f0, [%g2 + %g1] ASI_BLK_P
-       stda            %f16, [%g3 + %g1] ASI_BLK_P
-       membar          #Sync
-       ba,pt           %xcc, 4f
-        nop
-
-       .align          32
-4:     and             %o5, FPRS_DU, %o5
-       jmpl            %g7 + %g0, %g0
-        wr             %o5, FPRS_FEF, %fprs
index 1d649a95660c8cad57fbe90feadb7c43b9e8263f..8069ce12f20b13d514160cec8db0c0d88b64b27e 100644 (file)
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
 void VISenter(void);
 EXPORT_SYMBOL(VISenter);
 
-/* CRYPTO code needs this */
-void VISenterhalf(void);
-EXPORT_SYMBOL(VISenterhalf);
-
 extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
 extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
                unsigned long *);
index e8c2c04143cda81db9b51018a449b611263ab68d..c667e104a0c251d73f02ce2b812ed09878ca79a0 100644 (file)
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
        if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
                return -EFAULT;
 
-       memset(to, 0, sizeof(*to));
-
        err = __get_user(to->si_signo, &from->si_signo);
        err |= __get_user(to->si_errno, &from->si_errno);
        err |= __get_user(to->si_code, &from->si_code);
index 2c82bd150d43546d03d01de07ecc8e43f5a459d8..7d69afd8b6fa47dab5478ce4c8d831ae6780881a 100644 (file)
@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
                unsigned int e820_type = 0;
                unsigned long m = efi->efi_memmap;
 
+#ifdef CONFIG_X86_64
+               m |= (u64)efi->efi_memmap_hi << 32;
+#endif
+
                d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
                switch (d->type) {
                case EFI_RESERVED_TYPE:
index 5a1844765a7aba6dab47b878daf6eb723c044c03..a7e257d9cb90b9f34ecb03180fec8c54f2afd82f 100644 (file)
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
         */
        andl    $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
        movl    RIP(%rsp), %ecx         /* User %eip */
+       movq    RAX(%rsp), %rax
        RESTORE_RSI_RDI
        xorl    %edx, %edx              /* Do not leak kernel information */
        xorq    %r8, %r8
@@ -219,7 +220,6 @@ sysexit_from_sys_call:
 1:     setbe   %al                     /* 1 if error, 0 if not */
        movzbl  %al, %edi               /* zero-extend that into %edi */
        call    __audit_syscall_exit
-       movq    RAX(%rsp), %rax         /* reload syscall return value */
        movl    $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
@@ -368,6 +368,7 @@ sysretl_from_sys_call:
        RESTORE_RSI_RDI_RDX
        movl    RIP(%rsp), %ecx
        movl    EFLAGS(%rsp), %r11d
+       movq    RAX(%rsp), %rax
        xorq    %r10, %r10
        xorq    %r9, %r9
        xorq    %r8, %r8
index a0bf89fd26470102f6f38031808437f01afe8e1f..4e10d73cf01844edc201ed23f51d28a72a537521 100644 (file)
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
        set_ldt(NULL, 0);
 }
 
-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock(mm_context_t *pc)
-{
-       set_ldt(pc->ldt, pc->size);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
-       preempt_disable();
-       load_LDT_nolock(pc);
-       preempt_enable();
-}
-
 static inline unsigned long get_desc_base(const struct desc_struct *desc)
 {
        return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
index 09b9620a73b4841893fc2bd01bfd614d43e7e214..364d27481a52a34f5031933ecc577af0ebf3c129 100644 (file)
@@ -9,8 +9,7 @@
  * we put the segment information here.
  */
 typedef struct {
-       void *ldt;
-       int size;
+       struct ldt_struct *ldt;
 
 #ifdef CONFIG_X86_64
        /* True if mm supports a task running in 32 bit compatibility mode. */
index 804a3a6030ca046500e7a092cfa0a854be77796f..984abfe47edc85db7b1a1809d5e0b2d22a7c0449 100644 (file)
@@ -33,6 +33,50 @@ static inline void load_mm_cr4(struct mm_struct *mm)
 static inline void load_mm_cr4(struct mm_struct *mm) {}
 #endif
 
+/*
+ * ldt_structs can be allocated, used, and freed, but they are never
+ * modified while live.
+ */
+struct ldt_struct {
+       /*
+        * Xen requires page-aligned LDTs with special permissions.  This is
+        * needed to prevent us from installing evil descriptors such as
+        * call gates.  On native, we could merge the ldt_struct and LDT
+        * allocations, but it's not worth trying to optimize.
+        */
+       struct desc_struct *entries;
+       int size;
+};
+
+static inline void load_mm_ldt(struct mm_struct *mm)
+{
+       struct ldt_struct *ldt;
+
+       /* lockless_dereference synchronizes with smp_store_release */
+       ldt = lockless_dereference(mm->context.ldt);
+
+       /*
+        * Any change to mm->context.ldt is followed by an IPI to all
+        * CPUs with the mm active.  The LDT will not be freed until
+        * after the IPI is handled by all such CPUs.  This means that,
+        * if the ldt_struct changes before we return, the values we see
+        * will be safe, and the new values will be loaded before we run
+        * any user code.
+        *
+        * NB: don't try to convert this to use RCU without extreme care.
+        * We would still need IRQs off, because we don't want to change
+        * the local LDT after an IPI loaded a newer value than the one
+        * that we can see.
+        */
+
+       if (unlikely(ldt))
+               set_ldt(ldt->entries, ldt->size);
+       else
+               clear_LDT();
+
+       DEBUG_LOCKS_WARN_ON(preemptible());
+}
+
 /*
  * Used for LDT copy/destruction.
  */
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                 * was called and then modify_ldt changed
                 * prev->context.ldt but suppressed an IPI to this CPU.
                 * In this case, prev->context.ldt != NULL, because we
-                * never free an LDT while the mm still exists.  That
-                * means that next->context.ldt != prev->context.ldt,
-                * because mms never share an LDT.
+                * never set context.ldt to NULL while the mm still
+                * exists.  That means that next->context.ldt !=
+                * prev->context.ldt, because mms never share an LDT.
                 */
                if (unlikely(prev->context.ldt != next->context.ldt))
-                       load_LDT_nolock(&next->context);
+                       load_mm_ldt(next);
        }
 #ifdef CONFIG_SMP
          else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                        load_cr3(next->pgd);
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
                        load_mm_cr4(next);
-                       load_LDT_nolock(&next->context);
+                       load_mm_ldt(next);
                }
        }
 #endif
index 6fe6b182c9981dd891a9a5bc9a55b3e6591a6f9f..9dfce4e0417d92adc623d32ff93f67109316b451 100644 (file)
@@ -57,9 +57,9 @@ struct sigcontext {
        unsigned long ip;
        unsigned long flags;
        unsigned short cs;
-       unsigned short __pad2;  /* Was called gs, but was always zero. */
-       unsigned short __pad1;  /* Was called fs, but was always zero. */
-       unsigned short ss;
+       unsigned short gs;
+       unsigned short fs;
+       unsigned short __pad0;
        unsigned long err;
        unsigned long trapno;
        unsigned long oldmask;
index 0e8a973de9ee8aec0c555a5e9e8b23348e2cc10b..40836a9a7250c99a16dc5969057177f66aa57186 100644 (file)
@@ -177,24 +177,9 @@ struct sigcontext {
        __u64 rip;
        __u64 eflags;           /* RFLAGS */
        __u16 cs;
-
-       /*
-        * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
-        * Linux saved and restored fs and gs in these slots.  This
-        * was counterproductive, as fsbase and gsbase were never
-        * saved, so arch_prctl was presumably unreliable.
-        *
-        * If these slots are ever needed for any other purpose, there
-        * is some risk that very old 64-bit binaries could get
-        * confused.  I doubt that many such binaries still work,
-        * though, since the same patch in 2.5.64 also removed the
-        * 64-bit set_thread_area syscall, so it appears that there is
-        * no TLS API that works in both pre- and post-2.5.64 kernels.
-        */
-       __u16 __pad2;           /* Was gs. */
-       __u16 __pad1;           /* Was fs. */
-
-       __u16 ss;
+       __u16 gs;
+       __u16 fs;
+       __u16 __pad0;
        __u64 err;
        __u64 trapno;
        __u64 oldmask;
index 845dc0df2002472275a39e421502cdb0768c54a1..206052e5551702258a8c65f308fefaed2c988a07 100644 (file)
@@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
         */
        if (irq < nr_legacy_irqs() && data->count == 1) {
                if (info->ioapic_trigger != data->trigger)
-                       mp_register_handler(irq, data->trigger);
+                       mp_register_handler(irq, info->ioapic_trigger);
                data->entry.trigger = data->trigger = info->ioapic_trigger;
                data->entry.polarity = data->polarity = info->ioapic_polarity;
        }
index 922c5e0cea4c961b1aa6e7a266ce588de7f7a300..cb9e5df42dd2464f7249eea78cc28753eb858ec0 100644 (file)
@@ -1410,7 +1410,7 @@ void cpu_init(void)
        load_sp0(t, &current->thread);
        set_tss_desc(cpu, t);
        load_TR_desc();
-       load_LDT(&init_mm.context);
+       load_mm_ldt(&init_mm);
 
        clear_all_debug_regs();
        dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
        load_sp0(t, thread);
        set_tss_desc(cpu, t);
        load_TR_desc();
-       load_LDT(&init_mm.context);
+       load_mm_ldt(&init_mm);
 
        t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
 
index 3658de47900f9a921a0f8373db962e061d42db3a..9469dfa556074db4dff41212a4873b31d5907772 100644 (file)
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
        int idx = segment >> 3;
 
        if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+               struct ldt_struct *ldt;
+
                if (idx > LDT_ENTRIES)
                        return 0;
 
-               if (idx > current->active_mm->context.size)
+               /* IRQs are off, so this synchronizes with smp_store_release */
+               ldt = lockless_dereference(current->active_mm->context.ldt);
+               if (!ldt || idx > ldt->size)
                        return 0;
 
-               desc = current->active_mm->context.ldt;
+               desc = &ldt->entries[idx];
        } else {
                if (idx > GDT_ENTRIES)
                        return 0;
 
-               desc = raw_cpu_ptr(gdt_page.gdt);
+               desc = raw_cpu_ptr(gdt_page.gdt) + idx;
        }
 
-       return get_desc_base(desc + idx);
+       return get_desc_base(desc);
 }
 
 #ifdef CONFIG_COMPAT
index b9826a981fb20fa45a7c1255e277e9ad1cd5d150..6326ae24e4d5b4f3d228111c10f5c85df0e40d3f 100644 (file)
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
                cpuc->shared_regs = allocate_shared_regs(cpu);
                if (!cpuc->shared_regs)
-                       return NOTIFY_BAD;
+                       goto err;
        }
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
 
                cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
                if (!cpuc->constraint_list)
-                       return NOTIFY_BAD;
+                       goto err_shared_regs;
 
                cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
-               if (!cpuc->excl_cntrs) {
-                       kfree(cpuc->constraint_list);
-                       kfree(cpuc->shared_regs);
-                       return NOTIFY_BAD;
-               }
+               if (!cpuc->excl_cntrs)
+                       goto err_constraint_list;
+
                cpuc->excl_thread_id = 0;
        }
 
        return NOTIFY_OK;
+
+err_constraint_list:
+       kfree(cpuc->constraint_list);
+       cpuc->constraint_list = NULL;
+
+err_shared_regs:
+       kfree(cpuc->shared_regs);
+       cpuc->shared_regs = NULL;
+
+err:
+       return NOTIFY_BAD;
 }
 
 static void intel_pmu_cpu_starting(int cpu)
index 63eb68b73589bcbbc21f9c526193adca0de2e52d..377e8f8ed39186ad4ef57b33264592ed8459a037 100644 (file)
@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
        cpumask_set_cpu(cpu, &cqm_cpumask);
 }
 
-static void intel_cqm_cpu_prepare(unsigned int cpu)
+static void intel_cqm_cpu_starting(unsigned int cpu)
 {
        struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
        struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
        unsigned int cpu  = (unsigned long)hcpu;
 
        switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               intel_cqm_cpu_prepare(cpu);
-               break;
        case CPU_DOWN_PREPARE:
                intel_cqm_cpu_exit(cpu);
                break;
        case CPU_STARTING:
+               intel_cqm_cpu_starting(cpu);
                cqm_pick_event_reader(cpu);
                break;
        }
@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
                goto out;
 
        for_each_online_cpu(i) {
-               intel_cqm_cpu_prepare(i);
+               intel_cqm_cpu_starting(i);
                cqm_pick_event_reader(i);
        }
 
index c37886d759ccac2736c36b357cc0f399786f2fb3..2bcc0525f1c10e80b3db33df075b3189c4a68239 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
 
 #include <asm/mmu_context.h>
 #include <asm/syscalls.h>
 
-#ifdef CONFIG_SMP
+/* context.lock is held for us, so we don't need any locking. */
 static void flush_ldt(void *current_mm)
 {
-       if (current->active_mm == current_mm)
-               load_LDT(&current->active_mm->context);
+       mm_context_t *pc;
+
+       if (current->active_mm != current_mm)
+               return;
+
+       pc = &current->active_mm->context;
+       set_ldt(pc->ldt->entries, pc->ldt->size);
 }
-#endif
 
-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
+static struct ldt_struct *alloc_ldt_struct(int size)
 {
-       void *oldldt, *newldt;
-       int oldsize;
-
-       if (mincount <= pc->size)
-               return 0;
-       oldsize = pc->size;
-       mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
-                       (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
-       if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
-               newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
+       struct ldt_struct *new_ldt;
+       int alloc_size;
+
+       if (size > LDT_ENTRIES)
+               return NULL;
+
+       new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
+       if (!new_ldt)
+               return NULL;
+
+       BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
+       alloc_size = size * LDT_ENTRY_SIZE;
+
+       /*
+        * Xen is very picky: it requires a page-aligned LDT that has no
+        * trailing nonzero bytes in any page that contains LDT descriptors.
+        * Keep it simple: zero the whole allocation and never allocate less
+        * than PAGE_SIZE.
+        */
+       if (alloc_size > PAGE_SIZE)
+               new_ldt->entries = vzalloc(alloc_size);
        else
-               newldt = (void *)__get_free_page(GFP_KERNEL);
-
-       if (!newldt)
-               return -ENOMEM;
+               new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
 
-       if (oldsize)
-               memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
-       oldldt = pc->ldt;
-       memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
-              (mincount - oldsize) * LDT_ENTRY_SIZE);
+       if (!new_ldt->entries) {
+               kfree(new_ldt);
+               return NULL;
+       }
 
-       paravirt_alloc_ldt(newldt, mincount);
+       new_ldt->size = size;
+       return new_ldt;
+}
 
-#ifdef CONFIG_X86_64
-       /* CHECKME: Do we really need this ? */
-       wmb();
-#endif
-       pc->ldt = newldt;
-       wmb();
-       pc->size = mincount;
-       wmb();
-
-       if (reload) {
-#ifdef CONFIG_SMP
-               preempt_disable();
-               load_LDT(pc);
-               if (!cpumask_equal(mm_cpumask(current->mm),
-                                  cpumask_of(smp_processor_id())))
-                       smp_call_function(flush_ldt, current->mm, 1);
-               preempt_enable();
-#else
-               load_LDT(pc);
-#endif
-       }
-       if (oldsize) {
-               paravirt_free_ldt(oldldt, oldsize);
-               if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
-                       vfree(oldldt);
-               else
-                       put_page(virt_to_page(oldldt));
-       }
-       return 0;
+/* After calling this, the LDT is immutable. */
+static void finalize_ldt_struct(struct ldt_struct *ldt)
+{
+       paravirt_alloc_ldt(ldt->entries, ldt->size);
 }
 
-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+/* context.lock is held */
+static void install_ldt(struct mm_struct *current_mm,
+                       struct ldt_struct *ldt)
 {
-       int err = alloc_ldt(new, old->size, 0);
-       int i;
+       /* Synchronizes with lockless_dereference in load_mm_ldt. */
+       smp_store_release(&current_mm->context.ldt, ldt);
+
+       /* Activate the LDT for all CPUs using current_mm. */
+       on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
+}
 
-       if (err < 0)
-               return err;
+static void free_ldt_struct(struct ldt_struct *ldt)
+{
+       if (likely(!ldt))
+               return;
 
-       for (i = 0; i < old->size; i++)
-               write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
-       return 0;
+       paravirt_free_ldt(ldt->entries, ldt->size);
+       if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+               vfree(ldt->entries);
+       else
+               kfree(ldt->entries);
+       kfree(ldt);
 }
 
 /*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
  */
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
+       struct ldt_struct *new_ldt;
        struct mm_struct *old_mm;
        int retval = 0;
 
        mutex_init(&mm->context.lock);
-       mm->context.size = 0;
        old_mm = current->mm;
-       if (old_mm && old_mm->context.size > 0) {
-               mutex_lock(&old_mm->context.lock);
-               retval = copy_ldt(&mm->context, &old_mm->context);
-               mutex_unlock(&old_mm->context.lock);
+       if (!old_mm) {
+               mm->context.ldt = NULL;
+               return 0;
        }
+
+       mutex_lock(&old_mm->context.lock);
+       if (!old_mm->context.ldt) {
+               mm->context.ldt = NULL;
+               goto out_unlock;
+       }
+
+       new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
+       if (!new_ldt) {
+               retval = -ENOMEM;
+               goto out_unlock;
+       }
+
+       memcpy(new_ldt->entries, old_mm->context.ldt->entries,
+              new_ldt->size * LDT_ENTRY_SIZE);
+       finalize_ldt_struct(new_ldt);
+
+       mm->context.ldt = new_ldt;
+
+out_unlock:
+       mutex_unlock(&old_mm->context.lock);
        return retval;
 }
 
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  */
 void destroy_context(struct mm_struct *mm)
 {
-       if (mm->context.size) {
-#ifdef CONFIG_X86_32
-               /* CHECKME: Can this ever happen ? */
-               if (mm == current->active_mm)
-                       clear_LDT();
-#endif
-               paravirt_free_ldt(mm->context.ldt, mm->context.size);
-               if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
-                       vfree(mm->context.ldt);
-               else
-                       put_page(virt_to_page(mm->context.ldt));
-               mm->context.size = 0;
-       }
+       free_ldt_struct(mm->context.ldt);
+       mm->context.ldt = NULL;
 }
 
 static int read_ldt(void __user *ptr, unsigned long bytecount)
 {
-       int err;
+       int retval;
        unsigned long size;
        struct mm_struct *mm = current->mm;
 
-       if (!mm->context.size)
-               return 0;
+       mutex_lock(&mm->context.lock);
+
+       if (!mm->context.ldt) {
+               retval = 0;
+               goto out_unlock;
+       }
+
        if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
                bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
 
-       mutex_lock(&mm->context.lock);
-       size = mm->context.size * LDT_ENTRY_SIZE;
+       size = mm->context.ldt->size * LDT_ENTRY_SIZE;
        if (size > bytecount)
                size = bytecount;
 
-       err = 0;
-       if (copy_to_user(ptr, mm->context.ldt, size))
-               err = -EFAULT;
-       mutex_unlock(&mm->context.lock);
-       if (err < 0)
-               goto error_return;
+       if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
+               retval = -EFAULT;
+               goto out_unlock;
+       }
+
        if (size != bytecount) {
-               /* zero-fill the rest */
-               if (clear_user(ptr + size, bytecount - size) != 0) {
-                       err = -EFAULT;
-                       goto error_return;
+               /* Zero-fill the rest and pretend we read bytecount bytes. */
+               if (clear_user(ptr + size, bytecount - size)) {
+                       retval = -EFAULT;
+                       goto out_unlock;
                }
        }
-       return bytecount;
-error_return:
-       return err;
+       retval = bytecount;
+
+out_unlock:
+       mutex_unlock(&mm->context.lock);
+       return retval;
 }
 
 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
        struct desc_struct ldt;
        int error;
        struct user_desc ldt_info;
+       int oldsize, newsize;
+       struct ldt_struct *new_ldt, *old_ldt;
 
        error = -EINVAL;
        if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
                        goto out;
        }
 
-       mutex_lock(&mm->context.lock);
-       if (ldt_info.entry_number >= mm->context.size) {
-               error = alloc_ldt(&current->mm->context,
-                                 ldt_info.entry_number + 1, 1);
-               if (error < 0)
-                       goto out_unlock;
-       }
-
-       /* Allow LDTs to be cleared by the user. */
-       if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-               if (oldmode || LDT_empty(&ldt_info)) {
-                       memset(&ldt, 0, sizeof(ldt));
-                       goto install;
+       if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
+           LDT_empty(&ldt_info)) {
+               /* The user wants to clear the entry. */
+               memset(&ldt, 0, sizeof(ldt));
+       } else {
+               if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+                       error = -EINVAL;
+                       goto out;
                }
+
+               fill_ldt(&ldt, &ldt_info);
+               if (oldmode)
+                       ldt.avl = 0;
        }
 
-       if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
-               error = -EINVAL;
+       mutex_lock(&mm->context.lock);
+
+       old_ldt = mm->context.ldt;
+       oldsize = old_ldt ? old_ldt->size : 0;
+       newsize = max((int)(ldt_info.entry_number + 1), oldsize);
+
+       error = -ENOMEM;
+       new_ldt = alloc_ldt_struct(newsize);
+       if (!new_ldt)
                goto out_unlock;
-       }
 
-       fill_ldt(&ldt, &ldt_info);
-       if (oldmode)
-               ldt.avl = 0;
+       if (old_ldt)
+               memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
+       new_ldt->entries[ldt_info.entry_number] = ldt;
+       finalize_ldt_struct(new_ldt);
 
-       /* Install the new entry ...  */
-install:
-       write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
+       install_ldt(mm, new_ldt);
+       free_ldt_struct(old_ldt);
        error = 0;
 
 out_unlock:
index 71d7849a07f7ca93b126bf081464012c7a91c9a6..f6b916387590158706b8ef4ec68bfef64702ff0f 100644 (file)
@@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
 void release_thread(struct task_struct *dead_task)
 {
        if (dead_task->mm) {
-               if (dead_task->mm->context.size) {
+               if (dead_task->mm->context.ldt) {
                        pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
                                dead_task->comm,
                                dead_task->mm->context.ldt,
-                               dead_task->mm->context.size);
+                               dead_task->mm->context.ldt->size);
                        BUG();
                }
        }
index 206996c1669db344aba7ff072f734552723e7938..71820c42b6ce6bc1020bbc44277967d7e23f011e 100644 (file)
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                COPY(r15);
 #endif /* CONFIG_X86_64 */
 
+#ifdef CONFIG_X86_32
                COPY_SEG_CPL3(cs);
                COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+               /* Kernel saves and restores only the CS segment register on signals,
+                * which is the bare minimum needed to allow mixed 32/64-bit code.
+                * App's signal handler can save/restore other segments if needed. */
+               COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
 
                get_user_ex(tmpflags, &sc->flags);
                regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 #else /* !CONFIG_X86_32 */
                put_user_ex(regs->flags, &sc->flags);
                put_user_ex(regs->cs, &sc->cs);
-               put_user_ex(0, &sc->__pad2);
-               put_user_ex(0, &sc->__pad1);
-               put_user_ex(regs->ss, &sc->ss);
+               put_user_ex(0, &sc->gs);
+               put_user_ex(0, &sc->fs);
 #endif /* CONFIG_X86_32 */
 
                put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 
        regs->sp = (unsigned long)frame;
 
-       /*
-        * Set up the CS and SS registers to run signal handlers in
-        * 64-bit mode, even if the handler happens to be interrupting
-        * 32-bit or 16-bit code.
-        *
-        * SS is subtle.  In 64-bit mode, we don't need any particular
-        * SS descriptor, but we do need SS to be valid.  It's possible
-        * that the old SS is entirely bogus -- this can happen if the
-        * signal we're trying to deliver is #GP or #SS caused by a bad
-        * SS value.
-        */
+       /* Set up the CS register to run signal handlers in 64-bit mode,
+          even if the handler happens to be interrupting 32-bit code. */
        regs->cs = __USER_CS;
-       regs->ss = __USER_DS;
 
        return 0;
 }
index 9b4d51d0c0d013274f7ba46c2e58319f0d1d9145..0ccb53a9fcd9361b83c7acd26e1f64601816a3d1 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/mm.h>
 #include <linux/ptrace.h>
 #include <asm/desc.h>
+#include <asm/mmu_context.h>
 
 unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
 {
@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
                struct desc_struct *desc;
                unsigned long base;
 
-               seg &= ~7UL;
+               seg >>= 3;
 
                mutex_lock(&child->mm->context.lock);
-               if (unlikely((seg >> 3) >= child->mm->context.size))
+               if (unlikely(!child->mm->context.ldt ||
+                            seg >= child->mm->context.ldt->size))
                        addr = -1L; /* bogus selector, access would fault */
                else {
-                       desc = child->mm->context.ldt + seg;
+                       desc = &child->mm->context.ldt->entries[seg];
                        base = get_desc_base(desc);
 
                        /* 16-bit code segment? */
index dc0a84a6f3094ac997701de74c868763e6843229..9e8bf13572e6dc3f95d33d24f079d63e256a3a7a 100644 (file)
@@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
        if (iter.mtrr_disabled)
                return mtrr_disabled_type();
 
+       /* not contained in any MTRRs. */
+       if (type == -1)
+               return mtrr_default_type(mtrr_state);
+
        /*
         * We just check one page, partially covered by MTRRs is
         * impossible.
         */
        WARN_ON(iter.partial_map);
 
-       /* not contained in any MTRRs. */
-       if (type == -1)
-               return mtrr_default_type(mtrr_state);
-
        return type;
 }
 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
index 5ef2560075bfb80e6fdabcdf51f71258091e4339..8f0f6eca69da1dc6db95c16782871580bf57091d 100644 (file)
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (guest_cpuid_has_tsc_adjust(vcpu)) {
                        if (!msr_info->host_initiated) {
                                s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
-                               kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+                               adjust_tsc_offset_guest(vcpu, adj);
                        }
                        vcpu->arch.ia32_tsc_adjust_msr = data;
                }
@@ -6327,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 static void process_smi(struct kvm_vcpu *vcpu)
 {
        struct kvm_segment cs, ds;
+       struct desc_ptr dt;
        char buf[512];
        u32 cr0;
 
@@ -6359,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
 
        kvm_x86_ops->set_cr4(vcpu, 0);
 
+       /* Undocumented: IDT limit is set to zero on entry to SMM.  */
+       dt.address = dt.size = 0;
+       kvm_x86_ops->set_idt(vcpu, &dt);
+
        __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
 
        cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
index f37e84ab49f38e335bde57880a6cbe8640fb2c4b..3d8f2e421466a8af255eba9602748fee8753a377 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <asm/uaccess.h>
 #include <asm/traps.h>
-#include <asm/desc.h>
 #include <asm/user.h>
 #include <asm/fpu/internal.h>
 
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
                        math_abort(FPU_info, SIGILL);
                }
 
-               code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+               code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
                if (SEG_D_SIZE(code_descriptor)) {
                        /* The above test may be wrong, the book is not clear */
                        /* Segmented 32 bit protected mode */
index 9ccecb61a4fa129a82028b27edc18b91a2f99042..5e044d506b7aae8b17b2142966b11477cfe8e372 100644 (file)
 #include <linux/kernel.h>
 #include <linux/mm.h>
 
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s)      (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+       static struct desc_struct zero_desc;
+       struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+       seg >>= 3;
+       mutex_lock(&current->mm->context.lock);
+       if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+               ret = current->mm->context.ldt->entries[seg];
+       mutex_unlock(&current->mm->context.lock);
+#endif
+       return ret;
+}
+
 #define SEG_D_SIZE(x)          ((x).b & (3 << 21))
 #define SEG_G_BIT(x)           ((x).b & (1 << 23))
 #define SEG_GRANULARITY(x)     (((x).b & (1 << 23)) ? 4096 : 1)
index 6ef5e99380f92134ba86a6a693b5ac6d3434e6d4..8300db71c2a62681006e137350961742190ec9dc 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/stddef.h>
 
 #include <asm/uaccess.h>
-#include <asm/desc.h>
 
 #include "fpu_system.h"
 #include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
                addr->selector = PM_REG_(segment);
        }
 
-       descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+       descriptor = FPU_get_ldt_descriptor(addr->selector);
        base_address = SEG_BASE_ADDR(descriptor);
        address = base_address + offset;
        limit = base_address
index ec5214f39aa802ed923d10315a1672547493262d..70efcd0940f9f34b8649872b5b1ac44853a8e5f0 100644 (file)
@@ -246,7 +246,7 @@ static void emit_prologue(u8 **pprog)
  *     goto out;
  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
  *     goto out;
- *   prog = array->prog[index];
+ *   prog = array->ptrs[index];
  *   if (prog == NULL)
  *     goto out;
  *   goto *(prog->bpf_func + prologue_size);
@@ -284,9 +284,9 @@ static void emit_bpf_tail_call(u8 **pprog)
        EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
        EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
 
-       /* prog = array->prog[index]; */
+       /* prog = array->ptrs[index]; */
        EMIT4_off32(0x48, 0x8D, 0x84, 0xD6,       /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
-                   offsetof(struct bpf_array, prog));
+                   offsetof(struct bpf_array, ptrs));
        EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
 
        /* if (prog == NULL)
index cfba30f273921b302d03883be593d77d6a3f1fe7..e4308fe6afe81e4d8be5a42a6cc682174761fe1f 100644 (file)
@@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
 
 static int __init arch_parse_efi_cmdline(char *str)
 {
+       if (!str) {
+               pr_warn("need at least one option\n");
+               return -EINVAL;
+       }
+
        if (parse_option_str(str, "old_map"))
                set_bit(EFI_OLD_MEMMAP, &efi.flags);
        if (parse_option_str(str, "debug"))
index 0d7dd1f5ac36fa6814c18522dd28561566c570eb..9ab52791fed59e3ab2e531611037c4ed9dc7bde3 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/fpu/internal.h>
 #include <asm/debugreg.h>
 #include <asm/cpu.h>
+#include <asm/mmu_context.h>
 
 #ifdef CONFIG_X86_32
 __visible unsigned long saved_context_ebx;
@@ -153,7 +154,7 @@ static void fix_processor_context(void)
        syscall_init();                         /* This sets MSR_*STAR and related */
 #endif
        load_TR_desc();                         /* This does ltr */
-       load_LDT(&current->active_mm->context); /* This does lldt */
+       load_mm_ldt(current->active_mm);        /* This does lldt */
 
        fpu__resume_cpu();
 }
index e88fda867a33b198bc356aded57d59f48fcfb4ee..484145368a241207d8aa80a5f758a7d0f3ef54cb 100644 (file)
@@ -8,7 +8,7 @@ config XEN
        select PARAVIRT_CLOCK
        select XEN_HAVE_PVMMU
        depends on X86_64 || (X86_32 && X86_PAE)
-       depends on X86_TSC
+       depends on X86_LOCAL_APIC && X86_TSC
        help
          This is the Linux Xen port.  Enabling this will allow the
          kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
 config XEN_DOM0
        def_bool y
        depends on XEN && PCI_XEN && SWIOTLB_XEN
-       depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+       depends on X86_IO_APIC && ACPI && PCI
 
 config XEN_PVHVM
        def_bool y
index 7322755f337af760db6086450591c584c4dcda77..4b6e29ac0968c1a76451d3ff773652bc4afed138 100644 (file)
@@ -13,13 +13,13 @@ CFLAGS_mmu.o                        := $(nostackp)
 obj-y          := enlighten.o setup.o multicalls.o mmu.o irq.o \
                        time.o xen-asm.o xen-asm_$(BITS).o \
                        grant-table.o suspend.o platform-pci-unplug.o \
-                       p2m.o
+                       p2m.o apic.o
 
 obj-$(CONFIG_EVENT_TRACING) += trace.o
 
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
-obj-$(CONFIG_XEN_DOM0)         += apic.o vga.o
+obj-$(CONFIG_XEN_DOM0)         += vga.o
 obj-$(CONFIG_SWIOTLB_XEN)      += pci-swiotlb-xen.o
 obj-$(CONFIG_XEN_EFI)          += efi.o
index 0b95c9b8283fe2afe885d9a8ae98393c14ecc498..11d6fb4e8483d529f833cbc1276ea4ffd3c68102 100644 (file)
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
        pte_t pte;
        unsigned long pfn;
        struct page *page;
+       unsigned char dummy;
 
        ptep = lookup_address((unsigned long)v, &level);
        BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
 
        pte = pfn_pte(pfn, prot);
 
+       /*
+        * Careful: update_va_mapping() will fail if the virtual address
+        * we're poking isn't populated in the page tables.  We don't
+        * need to worry about the direct map (that's always in the page
+        * tables), but we need to be careful about vmap space.  In
+        * particular, the top level page table can lazily propagate
+        * entries between processes, so if we've switched mms since we
+        * vmapped the target in the first place, we might not have the
+        * top-level page table entry populated.
+        *
+        * We disable preemption because we want the same mm active when
+        * we probe the target and when we issue the hypercall.  We'll
+        * have the same nominal mm, but if we're a kernel thread, lazy
+        * mm dropping could change our pgd.
+        *
+        * Out of an abundance of caution, this uses __get_user() to fault
+        * in the target address just in case there's some obscure case
+        * in which the target address isn't readable.
+        */
+
+       preempt_disable();
+
+       pagefault_disable();    /* Avoid warnings due to being atomic. */
+       __get_user(dummy, (unsigned char __user __force *)v);
+       pagefault_enable();
+
        if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
                BUG();
 
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
                                BUG();
        } else
                kmap_flush_unused();
+
+       preempt_enable();
 }
 
 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
        const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
        int i;
 
+       /*
+        * We need to mark the all aliases of the LDT pages RO.  We
+        * don't need to call vm_flush_aliases(), though, since that's
+        * only responsible for flushing aliases out the TLBs, not the
+        * page tables, and Xen will flush the TLB for us if needed.
+        *
+        * To avoid confusing future readers: none of this is necessary
+        * to load the LDT.  The hypervisor only checks this when the
+        * LDT is faulted in due to subsequent descriptor access.
+        */
+
        for(i = 0; i < entries; i += entries_per_page)
                set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
 }
index c20fe29e65f48b4706789e0ad59bb33b1a3acc18..2292721b1d103844ade9f8a7f436a649c811f77d 100644 (file)
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
 
 #ifdef CONFIG_XEN_DOM0
 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
-void __init xen_init_apic(void);
 #else
 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
                                       size_t size)
 {
 }
-static inline void __init xen_init_apic(void)
-{
-}
 #endif
 
+void __init xen_init_apic(void);
+
 #ifdef CONFIG_XEN_EFI
 extern void xen_efi_init(void);
 #else
index 12600bfffca93f4547e2325eeda9669ff443a7a7..e0057d035200c4dd5e42d191f0395a7769489905 100644 (file)
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  * Description:
  *    Enables a low level driver to set a hard upper limit,
  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
- *    the device driver based upon the combined capabilities of I/O
- *    controller and storage device.
+ *    the device driver based upon the capabilities of the I/O
+ *    controller.
  *
  *    max_sectors is a soft limit imposed by the block layer for
  *    filesystem type requests.  This value can be overridden on a
index a3da6770bc9ed2bf66d59e8e74461829eeb4fe4e..b8efe36ce1142d0c6b0b8e45ec23965ec7135c40 100644 (file)
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
        struct scatterlist *cipher = areq_ctx->cipher;
        struct scatterlist *hsg = areq_ctx->hsg;
        struct scatterlist *tsg = areq_ctx->tsg;
-       struct scatterlist *assoc1;
-       struct scatterlist *assoc2;
        unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
        unsigned int cryptlen = req->cryptlen;
        struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc))
-               return -EINVAL;
-
-       assoc1 = assoc + 1;
-       if (sg_is_last(assoc1))
-               return -EINVAL;
-
-       assoc2 = assoc + 2;
-       if (!sg_is_last(assoc2))
+       if (assoc->length < 12)
                return -EINVAL;
 
        sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
 
        sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
 
        areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = assoc->length + assoc2->length;
-       areq_ctx->trailen = assoc1->length;
+       areq_ctx->headlen = 8;
+       areq_ctx->trailen = 4;
        areq_ctx->sg = dst;
 
        areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
        struct scatterlist *cipher = areq_ctx->cipher;
        struct scatterlist *hsg = areq_ctx->hsg;
        struct scatterlist *tsg = areq_ctx->tsg;
-       struct scatterlist *assoc1;
-       struct scatterlist *assoc2;
        unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
        struct page *srcp;
        u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc))
-               return -EINVAL;
-
-       assoc1 = assoc + 1;
-       if (sg_is_last(assoc1))
-               return -EINVAL;
-
-       assoc2 = assoc + 2;
-       if (!sg_is_last(assoc2))
+       if (assoc->length < 12)
                return -EINVAL;
 
        sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
 
        sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
 
        areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = assoc->length + assoc2->length;
-       areq_ctx->trailen = assoc1->length;
+       areq_ctx->headlen = 8;
+       areq_ctx->trailen = 4;
        areq_ctx->sg = src;
 
        areq_ctx->complete = authenc_esn_verify_ahash_done;
index 815f75ef24119eab28ce3c0c2047295c6e464c58..2922f1f252d58aafd2d6c233404ae7ca21abb524 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 #include <acpi/video.h>
 
 ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
 
 static bool backlight_notifier_registered;
 static struct notifier_block backlight_nb;
+static struct work_struct backlight_notify_work;
 
 static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
 static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
        { },
 };
 
+/* This uses a workqueue to avoid various locking ordering issues */
+static void acpi_video_backlight_notify_work(struct work_struct *work)
+{
+       if (acpi_video_get_backlight_type() != acpi_backlight_video)
+               acpi_video_unregister_backlight();
+}
+
 static int acpi_video_backlight_notify(struct notifier_block *nb,
                                       unsigned long val, void *bd)
 {
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
 
        /* A raw bl registering may change video -> native */
        if (backlight->props.type == BACKLIGHT_RAW &&
-           val == BACKLIGHT_REGISTERED &&
-           acpi_video_get_backlight_type() != acpi_backlight_video)
-               acpi_video_unregister_backlight();
+           val == BACKLIGHT_REGISTERED)
+               schedule_work(&backlight_notify_work);
 
        return NOTIFY_OK;
 }
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
                acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
                                    ACPI_UINT32_MAX, find_video, NULL,
                                    &video_caps, NULL);
+               INIT_WORK(&backlight_notify_work,
+                         acpi_video_backlight_notify_work);
                backlight_nb.notifier_call = acpi_video_backlight_notify;
                backlight_nb.priority = 0;
                if (backlight_register_notifier(&backlight_nb) == 0)
index ce1e3a8859815ca5724e376de6d6ab0d549c9831..14b7305d2ba0b3cc24aa101e76c87e242f01f537 100644 (file)
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
         * Other architectures (e.g., ARM) either do not support big endian, or
         * else leave I/O in little endian mode.
         */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(addr);
        else
                return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
 static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
 {
        /* See brcm_sata_readreg() comments */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(val, addr);
        else
                writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
                           priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int brcm_ahci_suspend(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
        brcm_sata_phys_enable(priv);
        return ahci_platform_resume(dev);
 }
+#endif
 
 static struct scsi_host_template ahci_platform_sht = {
        AHCI_SHT(DRV_NAME),
index db5d9f79a247c5ceb2cb590f206927c22f6f2b7c..19bcb80b20313932021b1ee613eed97f4473e17e 100644 (file)
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
  *     RETURNS:
  *     Block address read from @tf.
  */
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
 {
        u64 block = 0;
 
-       if (!dev || tf->flags & ATA_TFLAG_LBA) {
+       if (tf->flags & ATA_TFLAG_LBA) {
                if (tf->flags & ATA_TFLAG_LBA48) {
                        block |= (u64)tf->hob_lbah << 40;
                        block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
        return 0;
 }
 
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
-       unsigned int err_mask;
-
-       if (!ata_id_has_sense_reporting(dev->id))
-               return;
-
-       if (ata_id_sense_reporting_enabled(dev->id))
-               return;
-
-       err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
-       if (err_mask) {
-               ata_dev_dbg(dev,
-                           "failed to enable Sense Data Reporting, Emask 0x%x\n",
-                           err_mask);
-       }
-}
-
 /**
  *     ata_dev_configure - Configure the specified ATA/ATAPI device
  *     @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
                                        dev->devslp_timing[i] = sata_setting[j];
                                }
                }
-               ata_dev_config_sense_reporting(dev);
+
                dev->cdb_len = 16;
        }
 
index 7465031a893c60c9e61f2c911abf218b39c81d2e..cb0508af1459ac43f4aa26f1a16d94134bd9d0bc 100644 (file)
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
        tf->hob_lbah = buf[10];
        tf->nsect = buf[12];
        tf->hob_nsect = buf[13];
-       if (ata_id_has_ncq_autosense(dev->id))
-               tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
 
        return 0;
 }
@@ -1629,70 +1627,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
        return err_mask;
 }
 
-/**
- *     ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- *     @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- *     @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- *     @dfl_sense_key: default sense key to use
- *
- *     Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- *     SENSE.  This function is EH helper.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep).
- *
- *     RETURNS:
- *     encoded sense data on success, 0 on failure or if sense data
- *     is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
-                               struct scsi_cmnd *cmd)
-{
-       struct ata_device *dev = qc->dev;
-       struct ata_taskfile tf;
-       unsigned int err_mask;
-
-       if (!cmd)
-               return 0;
-
-       DPRINTK("ATA request sense\n");
-       ata_dev_warn(dev, "request sense\n");
-       if (!ata_id_sense_reporting_enabled(dev->id)) {
-               ata_dev_warn(qc->dev, "sense data reporting disabled\n");
-               return 0;
-       }
-       ata_tf_init(dev, &tf);
-
-       tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-       tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
-       tf.command = ATA_CMD_REQ_SENSE_DATA;
-       tf.protocol = ATA_PROT_NODATA;
-
-       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
-       /*
-        * ACS-4 states:
-        * The device may set the SENSE DATA AVAILABLE bit to one in the
-        * STATUS field and clear the ERROR bit to zero in the STATUS field
-        * to indicate that the command returned completion without an error
-        * and the sense data described in table 306 is available.
-        *
-        * IOW the 'ATA_SENSE' bit might not be set even though valid
-        * sense data is available.
-        * So check for both.
-        */
-       if ((tf.command & ATA_SENSE) ||
-               tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
-               ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-               ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
-                            tf.lbah, tf.lbam, tf.lbal);
-       } else {
-               ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
-                            tf.command, err_mask);
-       }
-       return err_mask;
-}
-
 /**
  *     atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
  *     @dev: device to perform REQUEST_SENSE to
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        memcpy(&qc->result_tf, &tf, sizeof(tf));
        qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
        qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
-       if (qc->result_tf.auxiliary) {
-               char sense_key, asc, ascq;
-
-               sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
-               asc = (qc->result_tf.auxiliary >> 8) & 0xff;
-               ascq = qc->result_tf.auxiliary & 0xff;
-               ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
-                           sense_key, asc, ascq);
-               ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
-               ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-       }
-
        ehc->i.err_mask &= ~AC_ERR_DEV;
 }
 
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
                return ATA_EH_RESET;
        }
 
-       /*
-        * Sense data reporting does not work if the
-        * device fault bit is set.
-        */
-       if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
-           !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
-               if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
-                       tmp = ata_eh_request_sense(qc, qc->scsicmd);
-                       if (tmp)
-                               qc->err_mask |= tmp;
-                       else
-                               ata_scsi_set_sense_information(qc->scsicmd, tf);
-               } else {
-                       ata_dev_warn(qc->dev, "sense data available but port frozen\n");
-               }
-       }
-
-       /* Set by NCQ autosense or request sense above */
-       if (qc->flags & ATA_QCFLAG_SENSE_VALID)
-               return 0;
-
        if (stat & (ATA_ERR | ATA_DF))
                qc->err_mask |= AC_ERR_DEV;
        else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
 
 #ifdef CONFIG_ATA_VERBOSE_ERROR
                if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
-                                   ATA_SENSE | ATA_ERR)) {
+                                   ATA_ERR)) {
                        if (res->command & ATA_BUSY)
                                ata_dev_err(qc->dev, "status: { Busy }\n");
                        else
-                               ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+                               ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
                                  res->command & ATA_DRDY ? "DRDY " : "",
                                  res->command & ATA_DF ? "DF " : "",
                                  res->command & ATA_DRQ ? "DRQ " : "",
-                                 res->command & ATA_SENSE ? "SENSE " : "",
                                  res->command & ATA_ERR ? "ERR " : "");
                }
 
index 641a61a59e89c00036af65d3a31fe2cf67eb22b8..0d7f0da3a26929622080f94a2a3125c63676999e 100644 (file)
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
            ata_scsi_park_show, ata_scsi_park_store);
 EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
 
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
 {
-       if (!cmd)
-               return;
-
        cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
        scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
 }
 
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                   const struct ata_taskfile *tf)
-{
-       u64 information;
-
-       if (!cmd)
-               return;
-
-       information = ata_tf_read_block(tf, NULL);
-       scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
 static ssize_t
 ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
            ((cdb[2] & 0x20) || need_sense)) {
                ata_gen_passthru_sense(qc);
        } else {
-               if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
-                       cmd->result = SAM_STAT_CHECK_CONDITION;
-               } else if (!need_sense) {
+               if (!need_sense) {
                        cmd->result = SAM_STAT_GOOD;
                } else {
                        /* TODO: decide which descriptor format to use
index a998a175f9f144b50e4df782bbf7d1afd5f506cb..f840ca18a7c014f5151d22e4bc55dff9fca459de 100644 (file)
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
 extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
                           u64 block, u32 n_block, unsigned int tf_flags,
                           unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
-                            struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
 extern unsigned ata_exec_internal(struct ata_device *dev,
                                  struct ata_taskfile *tf, const u8 *cdb,
                                  int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
                              struct scsi_host_template *sht);
 extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                          const struct ata_taskfile *tf);
 extern void ata_scsi_media_change_notify(struct ata_device *dev);
 extern void ata_scsi_hotplug(struct work_struct *work);
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
index 3a18a8a719b4ff1fa562a515b4701da241e4aeb7..fab504fd9cfd7ace54d772927a01650373d02206 100644 (file)
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
        readl(mmio + PDC_SDRAM_CONTROL);
 
        /* Turn on for ECC */
-       pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
-                         PDC_DIMM_SPD_TYPE, &spd0);
+       if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+                              PDC_DIMM_SPD_TYPE, &spd0)) {
+               pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+                      PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+               return 1;
+       }
        if (spd0 == 0x02) {
                data |= (0x01 << 16);
                writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
 
        /* ECC initiliazation. */
 
-       pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
-                         PDC_DIMM_SPD_TYPE, &spd0);
+       if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+                              PDC_DIMM_SPD_TYPE, &spd0)) {
+               pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+                      PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+               return 1;
+       }
        if (spd0 == 0x02) {
                void *buf;
                VPRINTK("Start ECC initialization\n");
index f3f6d167f3f1f015fec8e36ede66b7e8e274338f..4c20828993222164251e6294f1319572bcdc3e89 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/property.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
 
 /**
  * device_add_property_set - Add a collection of properties to a device object.
@@ -533,3 +535,79 @@ bool device_dma_is_coherent(struct device *dev)
        return coherent;
 }
 EXPORT_SYMBOL_GPL(device_dma_is_coherent);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev:       Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+       const char *pm;
+       int err, i;
+
+       err = device_property_read_string(dev, "phy-mode", &pm);
+       if (err < 0)
+               err = device_property_read_string(dev,
+                                                 "phy-connection-type", &pm);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+               if (!strcasecmp(pm, phy_modes(i)))
+                       return i;
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(device_get_phy_mode);
+
+static void *device_get_mac_addr(struct device *dev,
+                                const char *name, char *addr,
+                                int alen)
+{
+       int ret = device_property_read_u8_array(dev, name, addr, alen);
+
+       if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
+               return addr;
+       return NULL;
+}
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev:       Pointer to the device
+ * @addr:      Address of buffer to store the MAC in
+ * @alen:      Length of the buffer pointed to by addr, should be ETH_ALEN
+ *
+ * Search the firmware node for the best MAC address to use.  'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address.  If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the firmware tables, but were not updated by the firmware.  For
+ * example, the DTS could define 'mac-address' and 'local-mac-address', with
+ * zero MAC addresses.  Some older U-Boots only initialized 'local-mac-address'.
+ * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
+ * exists but is all zeros.
+*/
+void *device_get_mac_address(struct device *dev, char *addr, int alen)
+{
+       addr = device_get_mac_addr(dev, "mac-address", addr, alen);
+       if (addr)
+               return addr;
+
+       addr = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+       if (addr)
+               return addr;
+
+       return device_get_mac_addr(dev, "address", addr, alen);
+}
+EXPORT_SYMBOL(device_get_mac_address);
index 81751a49d8bf2334612350bba52406b9af352258..56486d92c4e72bd583630baea0fba541f36c926f 100644 (file)
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
        if (!blk)
                return -ENOMEM;
 
-       present = krealloc(rbnode->cache_present,
-                   BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
-       if (!present) {
-               kfree(blk);
-               return -ENOMEM;
+       if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+               present = krealloc(rbnode->cache_present,
+                                  BITS_TO_LONGS(blklen) * sizeof(*present),
+                                  GFP_KERNEL);
+               if (!present) {
+                       kfree(blk);
+                       return -ENOMEM;
+               }
+
+               memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+                      (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+                      * sizeof(*present));
+       } else {
+               present = rbnode->cache_present;
        }
 
        /* insert the register value in the correct place in the rbnode block */
index be5fffb6da2480845d14f920bd012db184887734..023d448ed3fa6047549d635fade7fb26205a8633 100644 (file)
@@ -92,7 +92,7 @@ config BCMA_DRIVER_GMAC_CMN
 config BCMA_DRIVER_GPIO
        bool "BCMA GPIO driver"
        depends on BCMA && GPIOLIB
-       select IRQ_DOMAIN if BCMA_HOST_SOC
+       select GPIOLIB_IRQCHIP if BCMA_HOST_SOC
        help
          Driver to provide access to the GPIO pins of the bcma bus.
 
index 5f6018e7cd4c42c5b4e2f7670c28e05a2bc21eb5..504899a7296649cecf922065e29282189f6d3d3b 100644 (file)
@@ -8,10 +8,8 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
-#include <linux/gpio.h>
-#include <linux/irq.h>
+#include <linux/gpio/driver.h>
 #include <linux/interrupt.h>
-#include <linux/irqdomain.h>
 #include <linux/export.h>
 #include <linux/bcma/bcma.h>
 
@@ -79,19 +77,11 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
 }
 
 #if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
-{
-       struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
-
-       if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
-               return irq_find_mapping(cc->irq_domain, gpio);
-       else
-               return -EINVAL;
-}
 
 static void bcma_gpio_irq_unmask(struct irq_data *d)
 {
-       struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
        int gpio = irqd_to_hwirq(d);
        u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
 
@@ -101,7 +91,8 @@ static void bcma_gpio_irq_unmask(struct irq_data *d)
 
 static void bcma_gpio_irq_mask(struct irq_data *d)
 {
-       struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
        int gpio = irqd_to_hwirq(d);
 
        bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
@@ -116,6 +107,7 @@ static struct irq_chip bcma_gpio_irq_chip = {
 static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
 {
        struct bcma_drv_cc *cc = dev_id;
+       struct gpio_chip *gc = &cc->gpio;
        u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
        u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
        u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
@@ -125,81 +117,58 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
        if (!irqs)
                return IRQ_NONE;
 
-       for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
-               generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
+       for_each_set_bit(gpio, &irqs, gc->ngpio)
+               generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio));
        bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
 
        return IRQ_HANDLED;
 }
 
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
 {
        struct gpio_chip *chip = &cc->gpio;
-       int gpio, hwirq, err;
+       int hwirq, err;
 
        if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
                return 0;
 
-       cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
-                                              &irq_domain_simple_ops, cc);
-       if (!cc->irq_domain) {
-               err = -ENODEV;
-               goto err_irq_domain;
-       }
-       for (gpio = 0; gpio < chip->ngpio; gpio++) {
-               int irq = irq_create_mapping(cc->irq_domain, gpio);
-
-               irq_set_chip_data(irq, cc);
-               irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
-                                        handle_simple_irq);
-       }
-
        hwirq = bcma_core_irq(cc->core, 0);
        err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
                          cc);
        if (err)
-               goto err_req_irq;
+               return err;
 
        bcma_chipco_gpio_intmask(cc, ~0, 0);
        bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
 
-       return 0;
-
-err_req_irq:
-       for (gpio = 0; gpio < chip->ngpio; gpio++) {
-               int irq = irq_find_mapping(cc->irq_domain, gpio);
-
-               irq_dispose_mapping(irq);
+       err =  gpiochip_irqchip_add(chip,
+                                   &bcma_gpio_irq_chip,
+                                   0,
+                                   handle_simple_irq,
+                                   IRQ_TYPE_NONE);
+       if (err) {
+               free_irq(hwirq, cc);
+               return err;
        }
-       irq_domain_remove(cc->irq_domain);
-err_irq_domain:
-       return err;
+
+       return 0;
 }
 
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
 {
-       struct gpio_chip *chip = &cc->gpio;
-       int gpio;
-
        if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
                return;
 
        bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
        free_irq(bcma_core_irq(cc->core, 0), cc);
-       for (gpio = 0; gpio < chip->ngpio; gpio++) {
-               int irq = irq_find_mapping(cc->irq_domain, gpio);
-
-               irq_dispose_mapping(irq);
-       }
-       irq_domain_remove(cc->irq_domain);
 }
 #else
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
 {
        return 0;
 }
 
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
 {
 }
 #endif
@@ -218,9 +187,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        chip->set               = bcma_gpio_set_value;
        chip->direction_input   = bcma_gpio_direction_input;
        chip->direction_output  = bcma_gpio_direction_output;
-#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-       chip->to_irq            = bcma_gpio_to_irq;
-#endif
+       chip->owner             = THIS_MODULE;
+       chip->dev               = bcma_bus_get_host_dev(bus);
 #if IS_BUILTIN(CONFIG_OF)
        if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
                chip->of_node   = cc->core->dev.of_node;
@@ -248,13 +216,13 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        else
                chip->base              = -1;
 
-       err = bcma_gpio_irq_domain_init(cc);
+       err = gpiochip_add(chip);
        if (err)
                return err;
 
-       err = gpiochip_add(chip);
+       err = bcma_gpio_irq_init(cc);
        if (err) {
-               bcma_gpio_irq_domain_exit(cc);
+               gpiochip_remove(chip);
                return err;
        }
 
@@ -263,7 +231,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
 
 int bcma_gpio_unregister(struct bcma_drv_cc *cc)
 {
-       bcma_gpio_irq_domain_exit(cc);
+       bcma_gpio_irq_exit(cc);
        gpiochip_remove(&cc->gpio);
        return 0;
 }
index d94529d5c8e951378eaf62d74b708edf271a550f..bc67a93aa4f4749f10d1a219789b21661c01ee21 100644 (file)
@@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
 #  define rbd_assert(expr)     ((void) 0)
 #endif /* !RBD_DEBUG */
 
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
        obj_request_done_set(obj_request);
 }
 
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p\n", __func__, obj_request);
+
+       if (obj_request_img_data_test(obj_request))
+               rbd_osd_copyup_callback(obj_request);
+       else
+               obj_request_done_set(obj_request);
+}
+
 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
                                struct ceph_msg *msg)
 {
@@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
                rbd_osd_discard_callback(obj_request);
                break;
        case CEPH_OSD_OP_CALL:
+               rbd_osd_call_callback(obj_request);
+               break;
        case CEPH_OSD_OP_NOTIFY_ACK:
        case CEPH_OSD_OP_WATCH:
                rbd_osd_trivial_callback(obj_request);
@@ -2530,13 +2543,15 @@ out_unwind:
 }
 
 static void
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
 {
        struct rbd_img_request *img_request;
        struct rbd_device *rbd_dev;
        struct page **pages;
        u32 page_count;
 
+       dout("%s: obj %p\n", __func__, obj_request);
+
        rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
                obj_request->type == OBJ_REQUEST_NODATA);
        rbd_assert(obj_request_img_data_test(obj_request));
@@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
        if (!obj_request->result)
                obj_request->xferred = obj_request->length;
 
-       /* Finish up with the normal image object callback */
-
-       rbd_img_obj_callback(obj_request);
+       obj_request_done_set(obj_request);
 }
 
 static void
@@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
 
        /* All set, send it off. */
 
-       orig_request->callback = rbd_img_obj_copyup_callback;
        osdc = &rbd_dev->rbd_client->client->osdc;
        img_result = rbd_obj_request_submit(osdc, orig_request);
        if (!img_result)
index ced96777b677b9bcddd65bae004a7a51b5cf0dc3..954c0029fb3babc49d1a1f490f9d420934701e30 100644 (file)
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
                return;
        }
 
-       if (work_pending(&blkif->persistent_purge_work)) {
-               pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
+       if (work_busy(&blkif->persistent_purge_work)) {
+               pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
                return;
        }
 
index 6d89ed35d80c0caaf8bf57ba82c7e9f3a9194bb9..7a8a73f1fc0462feab5bad706573ff6eb4536ef7 100644 (file)
@@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
        ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
 
 static int blkfront_setup_indirect(struct blkfront_info *info);
+static int blkfront_gather_backend_features(struct blkfront_info *info);
 
 static int get_id_from_freelist(struct blkfront_info *info)
 {
@@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                                 * Add the used indirect page back to the list of
                                 * available pages for indirect grefs.
                                 */
-                               indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
-                               list_add(&indirect_page->lru, &info->indirect_pages);
+                               if (!info->feature_persistent) {
+                                       indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+                                       list_add(&indirect_page->lru, &info->indirect_pages);
+                               }
                                s->indirect_grants[i]->gref = GRANT_INVALID_REF;
                                list_add_tail(&s->indirect_grants[i]->node, &info->grants);
                        }
@@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
        info->shadow_free = info->ring.req_prod_pvt;
        info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
 
-       rc = blkfront_setup_indirect(info);
+       rc = blkfront_gather_backend_features(info);
        if (rc) {
                kfree(copy);
                return rc;
@@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
 
 static int blkfront_setup_indirect(struct blkfront_info *info)
 {
-       unsigned int indirect_segments, segs;
+       unsigned int segs;
        int err, i;
 
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-max-indirect-segments", "%u", &indirect_segments,
-                           NULL);
-       if (err) {
-               info->max_indirect_segments = 0;
+       if (info->max_indirect_segments == 0)
                segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
-       } else {
-               info->max_indirect_segments = min(indirect_segments,
-                                                 xen_blkif_max_segments);
+       else
                segs = info->max_indirect_segments;
-       }
 
        err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
        if (err)
@@ -1796,6 +1792,68 @@ out_of_memory:
        return -ENOMEM;
 }
 
+/*
+ * Gather all backend feature-*
+ */
+static int blkfront_gather_backend_features(struct blkfront_info *info)
+{
+       int err;
+       int barrier, flush, discard, persistent;
+       unsigned int indirect_segments;
+
+       info->feature_flush = 0;
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-barrier", "%d", &barrier,
+                       NULL);
+
+       /*
+        * If there's no "feature-barrier" defined, then it means
+        * we're dealing with a very old backend which writes
+        * synchronously; nothing to do.
+        *
+        * If there are barriers, then we use flush.
+        */
+       if (!err && barrier)
+               info->feature_flush = REQ_FLUSH | REQ_FUA;
+       /*
+        * And if there is "feature-flush-cache" use that above
+        * barriers.
+        */
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-flush-cache", "%d", &flush,
+                       NULL);
+
+       if (!err && flush)
+               info->feature_flush = REQ_FLUSH;
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-discard", "%d", &discard,
+                       NULL);
+
+       if (!err && discard)
+               blkfront_setup_discard(info);
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-persistent", "%u", &persistent,
+                       NULL);
+       if (err)
+               info->feature_persistent = 0;
+       else
+               info->feature_persistent = persistent;
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                           "feature-max-indirect-segments", "%u", &indirect_segments,
+                           NULL);
+       if (err)
+               info->max_indirect_segments = 0;
+       else
+               info->max_indirect_segments = min(indirect_segments,
+                                                 xen_blkif_max_segments);
+
+       return blkfront_setup_indirect(info);
+}
+
 /*
  * Invoked when the backend is finally 'ready' (and has told produced
  * the details about the physical device - #sectors, size, etc).
@@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
        unsigned int physical_sector_size;
        unsigned int binfo;
        int err;
-       int barrier, flush, discard, persistent;
 
        switch (info->connected) {
        case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
        if (err != 1)
                physical_sector_size = sector_size;
 
-       info->feature_flush = 0;
-
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-barrier", "%d", &barrier,
-                           NULL);
-
-       /*
-        * If there's no "feature-barrier" defined, then it means
-        * we're dealing with a very old backend which writes
-        * synchronously; nothing to do.
-        *
-        * If there are barriers, then we use flush.
-        */
-       if (!err && barrier)
-               info->feature_flush = REQ_FLUSH | REQ_FUA;
-       /*
-        * And if there is "feature-flush-cache" use that above
-        * barriers.
-        */
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-flush-cache", "%d", &flush,
-                           NULL);
-
-       if (!err && flush)
-               info->feature_flush = REQ_FLUSH;
-
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-discard", "%d", &discard,
-                           NULL);
-
-       if (!err && discard)
-               blkfront_setup_discard(info);
-
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-persistent", "%u", &persistent,
-                           NULL);
-       if (err)
-               info->feature_persistent = 0;
-       else
-               info->feature_persistent = persistent;
-
-       err = blkfront_setup_indirect(info);
+       err = blkfront_gather_backend_features(info);
        if (err) {
                xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
                                 info->xbdev->otherend);
index fb655e8d1e3b17bf4cda9fd09593bc7dc770f78d..763301c7828c72650f2abaa1c723425bdd3c73f4 100644 (file)
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
        kfree(meta);
 }
 
-static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 {
        size_t num_pages;
-       char pool_name[8];
        struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
 
        if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
                goto out_error;
        }
 
-       snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
        meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
        if (!meta->mem_pool) {
                pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
                return -EINVAL;
 
        disksize = PAGE_ALIGN(disksize);
-       meta = zram_meta_alloc(zram->disk->first_minor, disksize);
+       meta = zram_meta_alloc(zram->disk->disk_name, disksize);
        if (!meta)
                return -ENOMEM;
 
index 79e8234b1aa5995eaf8b8e25d729e6da193518fe..0bd88c942a5202845960ddbf62b35e9d1aa9e9d2 100644 (file)
@@ -13,6 +13,10 @@ config BT_RTL
        tristate
        select FW_LOADER
 
+config BT_QCA
+       tristate
+       select FW_LOADER
+
 config BT_HCIBTUSB
        tristate "HCI USB driver"
        depends on USB
@@ -151,6 +155,19 @@ config BT_HCIUART_BCM
 
          Say Y here to compile support for Broadcom protocol.
 
+config BT_HCIUART_QCA
+       bool "Qualcomm Atheros protocol support"
+       depends on BT_HCIUART
+       select BT_HCIUART_H4
+       select BT_QCA
+       help
+         The Qualcomm Atheros protocol supports HCI In-Band Sleep feature
+         over serial port interface(H4) between controller and host.
+         This protocol is required for UART clock control for QCA Bluetooth
+         devices.
+
+         Say Y here to compile support for QCA protocol.
+
 config BT_HCIBCM203X
        tristate "HCI BCM203x USB driver"
        depends on USB
index f40e194e7080183e999ebb5ac8381130cb7a4b54..07c9cf381e5aeb2585a18beb0f9aa98a75862e9e 100644 (file)
@@ -22,6 +22,7 @@ obj-$(CONFIG_BT_MRVL_SDIO)    += btmrvl_sdio.o
 obj-$(CONFIG_BT_WILINK)                += btwilink.o
 obj-$(CONFIG_BT_BCM)           += btbcm.o
 obj-$(CONFIG_BT_RTL)           += btrtl.o
+obj-$(CONFIG_BT_QCA)           += btqca.o
 
 btmrvl-y                       := btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)      += btmrvl_debugfs.o
@@ -34,6 +35,7 @@ hci_uart-$(CONFIG_BT_HCIUART_ATH3K)   += hci_ath.o
 hci_uart-$(CONFIG_BT_HCIUART_3WIRE)    += hci_h5.o
 hci_uart-$(CONFIG_BT_HCIUART_INTEL)    += hci_intel.o
 hci_uart-$(CONFIG_BT_HCIUART_BCM)      += hci_bcm.o
+hci_uart-$(CONFIG_BT_HCIUART_QCA)      += hci_qca.o
 hci_uart-objs                          := $(hci_uart-y)
 
 ccflags-y += -D__CHECK_ENDIAN__
index b9a811900f6ab534087e17f3726c840a8afdb34e..7c097629e59312e232757aa8b5f67f4130b71d3e 100644 (file)
@@ -1071,8 +1071,6 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
                }
        }
 
-       sdio_release_host(card->func);
-
        /*
         * winner or not, with this test the FW synchronizes when the
         * module can continue its initialization
@@ -1082,6 +1080,8 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
                return -ETIMEDOUT;
        }
 
+       sdio_release_host(card->func);
+
        return 0;
 
 done:
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
new file mode 100644 (file)
index 0000000..4a62081
--- /dev/null
@@ -0,0 +1,392 @@
+/*
+ *  Bluetooth supports for Qualcomm Atheros chips
+ *
+ *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btqca.h"
+
+#define VERSION "0.1"
+
+static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
+{
+       struct sk_buff *skb;
+       struct edl_event_hdr *edl;
+       struct rome_version *ver;
+       char cmd;
+       int err = 0;
+
+       BT_DBG("%s: ROME Patch Version Request", hdev->name);
+
+       cmd = EDL_PATCH_VER_REQ_CMD;
+       skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+                               &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
+                      err);
+               return err;
+       }
+
+       if (skb->len != sizeof(*edl) + sizeof(*ver)) {
+               BT_ERR("%s: Version size mismatch len %d", hdev->name,
+                      skb->len);
+               err = -EILSEQ;
+               goto out;
+       }
+
+       edl = (struct edl_event_hdr *)(skb->data);
+       if (!edl || !edl->data) {
+               BT_ERR("%s: TLV with no header or no data", hdev->name);
+               err = -EILSEQ;
+               goto out;
+       }
+
+       if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+           edl->rtype != EDL_APP_VER_RES_EVT) {
+               BT_ERR("%s: Wrong packet received %d %d", hdev->name,
+                      edl->cresp, edl->rtype);
+               err = -EIO;
+               goto out;
+       }
+
+       ver = (struct rome_version *)(edl->data);
+
+       BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
+       BT_DBG("%s: Patch  :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
+       BT_DBG("%s: ROM    :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
+       BT_DBG("%s: SOC    :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
+
+       /* ROME chipset version can be decided by patch and SoC
+        * version, combination with upper 2 bytes from SoC
+        * and lower 2 bytes from patch will be used.
+        */
+       *rome_version = (le32_to_cpu(ver->soc_id) << 16) |
+                       (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+
+out:
+       kfree_skb(skb);
+
+       return err;
+}
+
+static int rome_reset(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       int err;
+
+       BT_DBG("%s: ROME HCI_RESET", hdev->name);
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Reset failed (%d)", hdev->name, err);
+               return err;
+       }
+
+       kfree_skb(skb);
+
+       return 0;
+}
+
+static void rome_tlv_check_data(struct rome_config *config,
+                               const struct firmware *fw)
+{
+       const u8 *data;
+       u32 type_len;
+       u16 tag_id, tag_len;
+       int idx, length;
+       struct tlv_type_hdr *tlv;
+       struct tlv_type_patch *tlv_patch;
+       struct tlv_type_nvm *tlv_nvm;
+
+       tlv = (struct tlv_type_hdr *)fw->data;
+
+       type_len = le32_to_cpu(tlv->type_len);
+       length = (type_len >> 8) & 0x00ffffff;
+
+       BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
+       BT_DBG("Length\t\t : %d bytes", length);
+
+       switch (config->type) {
+       case TLV_TYPE_PATCH:
+               tlv_patch = (struct tlv_type_patch *)tlv->data;
+               BT_DBG("Total Length\t\t : %d bytes",
+                      le32_to_cpu(tlv_patch->total_size));
+               BT_DBG("Patch Data Length\t : %d bytes",
+                      le32_to_cpu(tlv_patch->data_length));
+               BT_DBG("Signing Format Version : 0x%x",
+                      tlv_patch->format_version);
+               BT_DBG("Signature Algorithm\t : 0x%x",
+                      tlv_patch->signature);
+               BT_DBG("Reserved\t\t : 0x%x",
+                      le16_to_cpu(tlv_patch->reserved1));
+               BT_DBG("Product ID\t\t : 0x%04x",
+                      le16_to_cpu(tlv_patch->product_id));
+               BT_DBG("Rom Build Version\t : 0x%04x",
+                      le16_to_cpu(tlv_patch->rom_build));
+               BT_DBG("Patch Version\t\t : 0x%04x",
+                      le16_to_cpu(tlv_patch->patch_version));
+               BT_DBG("Reserved\t\t : 0x%x",
+                      le16_to_cpu(tlv_patch->reserved2));
+               BT_DBG("Patch Entry Address\t : 0x%x",
+                      le32_to_cpu(tlv_patch->entry));
+               break;
+
+       case TLV_TYPE_NVM:
+               idx = 0;
+               data = tlv->data;
+               while (idx < length) {
+                       tlv_nvm = (struct tlv_type_nvm *)(data + idx);
+
+                       tag_id = le16_to_cpu(tlv_nvm->tag_id);
+                       tag_len = le16_to_cpu(tlv_nvm->tag_len);
+
+                       /* Update NVM tags as needed */
+                       switch (tag_id) {
+                       case EDL_TAG_ID_HCI:
+                               /* HCI transport layer parameters
+                                * enabling software inband sleep
+                                * onto controller side.
+                                */
+                               tlv_nvm->data[0] |= 0x80;
+
+                               /* UART Baud Rate */
+                               tlv_nvm->data[2] = config->user_baud_rate;
+
+                               break;
+
+                       case EDL_TAG_ID_DEEP_SLEEP:
+                               /* Sleep enable mask
+                                * enabling deep sleep feature on controller.
+                                */
+                               tlv_nvm->data[0] |= 0x01;
+
+                               break;
+                       }
+
+                       idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
+               }
+               break;
+
+       default:
+               BT_ERR("Unknown TLV type %d", config->type);
+               break;
+       }
+}
+
+static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
+                                const u8 *data)
+{
+       struct sk_buff *skb;
+       struct edl_event_hdr *edl;
+       struct tlv_seg_resp *tlv_resp;
+       u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
+       int err = 0;
+
+       BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size);
+
+       cmd[0] = EDL_PATCH_TLV_REQ_CMD;
+       cmd[1] = seg_size;
+       memcpy(cmd + 2, data, seg_size);
+
+       skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
+                               HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
+               return err;
+       }
+
+       if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
+               BT_ERR("%s: TLV response size mismatch", hdev->name);
+               err = -EILSEQ;
+               goto out;
+       }
+
+       edl = (struct edl_event_hdr *)(skb->data);
+       if (!edl || !edl->data) {
+               BT_ERR("%s: TLV with no header or no data", hdev->name);
+               err = -EILSEQ;
+               goto out;
+       }
+
+       tlv_resp = (struct tlv_seg_resp *)(edl->data);
+
+       if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+           edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
+               BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
+                      hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
+               err = -EIO;
+       }
+
+out:
+       kfree_skb(skb);
+
+       return err;
+}
+
+static int rome_tlv_download_request(struct hci_dev *hdev,
+                                    const struct firmware *fw)
+{
+       const u8 *buffer, *data;
+       int total_segment, remain_size;
+       int ret, i;
+
+       if (!fw || !fw->data)
+               return -EINVAL;
+
+       total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT;
+       remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT;
+
+       BT_DBG("%s: Total segment num %d remain size %d total size %zu",
+              hdev->name, total_segment, remain_size, fw->size);
+
+       data = fw->data;
+       for (i = 0; i < total_segment; i++) {
+               buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT;
+               ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT,
+                                           buffer);
+               if (ret < 0)
+                       return -EIO;
+       }
+
+       if (remain_size) {
+               buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT;
+               ret = rome_tlv_send_segment(hdev, total_segment, remain_size,
+                                           buffer);
+               if (ret < 0)
+                       return -EIO;
+       }
+
+       return 0;
+}
+
+static int rome_download_firmware(struct hci_dev *hdev,
+                                 struct rome_config *config)
+{
+       const struct firmware *fw;
+       int ret;
+
+       BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname);
+
+       ret = request_firmware(&fw, config->fwname, &hdev->dev);
+       if (ret) {
+               BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
+                      config->fwname, ret);
+               return ret;
+       }
+
+       rome_tlv_check_data(config, fw);
+
+       ret = rome_tlv_download_request(hdev, fw);
+       if (ret) {
+               BT_ERR("%s: Failed to download file: %s (%d)", hdev->name,
+                      config->fwname, ret);
+       }
+
+       release_firmware(fw);
+
+       return ret;
+}
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       struct sk_buff *skb;
+       u8 cmd[9];
+       int err;
+
+       cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
+       cmd[1] = 0x02;                  /* TAG ID */
+       cmd[2] = sizeof(bdaddr_t);      /* size */
+       memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
+       skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
+                               HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Change address command failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+
+       kfree_skb(skb);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
+{
+       u32 rome_ver = 0;
+       struct rome_config config;
+       int err;
+
+       BT_DBG("%s: ROME setup on UART", hdev->name);
+
+       config.user_baud_rate = baudrate;
+
+       /* Get ROME version information */
+       err = rome_patch_ver_req(hdev, &rome_ver);
+       if (err < 0 || rome_ver == 0) {
+               BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
+               return err;
+       }
+
+       BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver);
+
+       /* Download rampatch file */
+       config.type = TLV_TYPE_PATCH;
+       snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
+                rome_ver);
+       err = rome_download_firmware(hdev, &config);
+       if (err < 0) {
+               BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
+               return err;
+       }
+
+       /* Download NVM configuration */
+       config.type = TLV_TYPE_NVM;
+       snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
+                rome_ver);
+       err = rome_download_firmware(hdev, &config);
+       if (err < 0) {
+               BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
+               return err;
+       }
+
+       /* Perform HCI reset */
+       err = rome_reset(hdev);
+       if (err < 0) {
+               BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
+               return err;
+       }
+
+       BT_INFO("%s: ROME setup on UART is completed", hdev->name);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
+
+MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
new file mode 100644 (file)
index 0000000..65e994b
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ *  Bluetooth supports for Qualcomm Atheros ROME chips
+ *
+ *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#define EDL_PATCH_CMD_OPCODE           (0xFC00)
+#define EDL_NVM_ACCESS_OPCODE          (0xFC0B)
+#define EDL_PATCH_CMD_LEN              (1)
+#define EDL_PATCH_VER_REQ_CMD          (0x19)
+#define EDL_PATCH_TLV_REQ_CMD          (0x1E)
+#define EDL_NVM_ACCESS_SET_REQ_CMD     (0x01)
+#define MAX_SIZE_PER_TLV_SEGMENT       (243)
+
+#define EDL_CMD_REQ_RES_EVT            (0x00)
+#define EDL_PATCH_VER_RES_EVT          (0x19)
+#define EDL_APP_VER_RES_EVT            (0x02)
+#define EDL_TVL_DNLD_RES_EVT           (0x04)
+#define EDL_CMD_EXE_STATUS_EVT         (0x00)
+#define EDL_SET_BAUDRATE_RSP_EVT       (0x92)
+#define EDL_NVM_ACCESS_CODE_EVT                (0x0B)
+
+#define EDL_TAG_ID_HCI                 (17)
+#define EDL_TAG_ID_DEEP_SLEEP          (27)
+
+enum qca_bardrate {
+       QCA_BAUDRATE_115200     = 0,
+       QCA_BAUDRATE_57600,
+       QCA_BAUDRATE_38400,
+       QCA_BAUDRATE_19200,
+       QCA_BAUDRATE_9600,
+       QCA_BAUDRATE_230400,
+       QCA_BAUDRATE_250000,
+       QCA_BAUDRATE_460800,
+       QCA_BAUDRATE_500000,
+       QCA_BAUDRATE_720000,
+       QCA_BAUDRATE_921600,
+       QCA_BAUDRATE_1000000,
+       QCA_BAUDRATE_1250000,
+       QCA_BAUDRATE_2000000,
+       QCA_BAUDRATE_3000000,
+       QCA_BAUDRATE_4000000,
+       QCA_BAUDRATE_1600000,
+       QCA_BAUDRATE_3200000,
+       QCA_BAUDRATE_3500000,
+       QCA_BAUDRATE_AUTO       = 0xFE,
+       QCA_BAUDRATE_RESERVED
+};
+
+enum rome_tlv_type {
+       TLV_TYPE_PATCH = 1,
+       TLV_TYPE_NVM
+};
+
+struct rome_config {
+       u8 type;
+       char fwname[64];
+       uint8_t user_baud_rate;
+};
+
+struct edl_event_hdr {
+       __u8 cresp;
+       __u8 rtype;
+       __u8 data[0];
+} __packed;
+
+struct rome_version {
+       __le32 product_id;
+       __le16 patch_ver;
+       __le16 rome_ver;
+       __le32 soc_id;
+} __packed;
+
+struct tlv_seg_resp {
+       __u8 result;
+} __packed;
+
+struct tlv_type_patch {
+       __le32 total_size;
+       __le32 data_length;
+       __u8   format_version;
+       __u8   signature;
+       __le16 reserved1;
+       __le16 product_id;
+       __le16 rom_build;
+       __le16 patch_version;
+       __le16 reserved2;
+       __le32 entry;
+} __packed;
+
+struct tlv_type_nvm {
+       __le16 tag_id;
+       __le16 tag_len;
+       __le32 reserve1;
+       __le32 reserve2;
+       __u8   data[0];
+} __packed;
+
+struct tlv_type_hdr {
+       __le32 type_len;
+       __u8   data[0];
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_QCA)
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
+
+#else
+
+static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
index cc92b0f84a5168e139435737cef2c63ab1ee68e6..f759dea7d3baeee54da02784df91417e4632f3f0 100644 (file)
@@ -322,6 +322,9 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
        { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
 
+       /* Silicon Wave based devices */
+       { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+
        { }     /* Terminating entry */
 };
 
index 23523e140a9a11ef29e08541782e3d8627dca221..322302b04710f1764fd339247f0ee32aed924743 100644 (file)
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 #include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/tty.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include "btbcm.h"
 #include "hci_uart.h"
 
+struct bcm_device {
+       struct list_head        list;
+
+       struct platform_device  *pdev;
+
+       const char              *name;
+       struct gpio_desc        *device_wakeup;
+       struct gpio_desc        *shutdown;
+
+       struct clk              *clk;
+       bool                    clk_enabled;
+
+       u32                     init_speed;
+
+#ifdef CONFIG_PM_SLEEP
+       struct hci_uart         *hu;
+       bool                    is_suspended; /* suspend/resume flag */
+#endif
+};
+
 struct bcm_data {
-       struct sk_buff *rx_skb;
-       struct sk_buff_head txq;
+       struct sk_buff          *rx_skb;
+       struct sk_buff_head     txq;
+
+       struct bcm_device       *dev;
 };
 
+/* List of BCM BT UART devices */
+static DEFINE_SPINLOCK(bcm_device_list_lock);
+static LIST_HEAD(bcm_device_list);
+
 static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
 {
        struct hci_dev *hdev = hu->hdev;
@@ -86,9 +118,41 @@ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
        return 0;
 }
 
+/* bcm_device_exists should be protected by bcm_device_list_lock */
+static bool bcm_device_exists(struct bcm_device *device)
+{
+       struct list_head *p;
+
+       list_for_each(p, &bcm_device_list) {
+               struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+               if (device == dev)
+                       return true;
+       }
+
+       return false;
+}
+
+static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
+{
+       if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+               clk_enable(dev->clk);
+
+       gpiod_set_value_cansleep(dev->shutdown, powered);
+       gpiod_set_value_cansleep(dev->device_wakeup, powered);
+
+       if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
+               clk_disable(dev->clk);
+
+       dev->clk_enabled = powered;
+
+       return 0;
+}
+
 static int bcm_open(struct hci_uart *hu)
 {
        struct bcm_data *bcm;
+       struct list_head *p;
 
        BT_DBG("hu %p", hu);
 
@@ -99,6 +163,30 @@ static int bcm_open(struct hci_uart *hu)
        skb_queue_head_init(&bcm->txq);
 
        hu->priv = bcm;
+
+       spin_lock(&bcm_device_list_lock);
+       list_for_each(p, &bcm_device_list) {
+               struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+               /* Retrieve saved bcm_device based on parent of the
+                * platform device (saved during device probe) and
+                * parent of tty device used by hci_uart
+                */
+               if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+                       bcm->dev = dev;
+                       hu->init_speed = dev->init_speed;
+#ifdef CONFIG_PM_SLEEP
+                       dev->hu = hu;
+#endif
+                       break;
+               }
+       }
+
+       if (bcm->dev)
+               bcm_gpio_set_power(bcm->dev, true);
+
+       spin_unlock(&bcm_device_list_lock);
+
        return 0;
 }
 
@@ -108,6 +196,16 @@ static int bcm_close(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       /* Protect bcm->dev against removal of the device or driver */
+       spin_lock(&bcm_device_list_lock);
+       if (bcm_device_exists(bcm->dev)) {
+               bcm_gpio_set_power(bcm->dev, false);
+#ifdef CONFIG_PM_SLEEP
+               bcm->dev->hu = NULL;
+#endif
+       }
+       spin_unlock(&bcm_device_list_lock);
+
        skb_queue_purge(&bcm->txq);
        kfree_skb(bcm->rx_skb);
        kfree(bcm);
@@ -232,6 +330,188 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
        return skb_dequeue(&bcm->txq);
 }
 
+#ifdef CONFIG_PM_SLEEP
+/* Platform suspend callback */
+static int bcm_suspend(struct device *dev)
+{
+       struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+       BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+       if (!bdev->is_suspended) {
+               hci_uart_set_flow_control(bdev->hu, true);
+
+               /* Once this callback returns, driver suspends BT via GPIO */
+               bdev->is_suspended = true;
+       }
+
+       /* Suspend the device */
+       if (bdev->device_wakeup) {
+               gpiod_set_value(bdev->device_wakeup, false);
+               BT_DBG("suspend, delaying 15 ms");
+               mdelay(15);
+       }
+
+       return 0;
+}
+
+/* Platform resume callback */
+static int bcm_resume(struct device *dev)
+{
+       struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+       BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+       if (bdev->device_wakeup) {
+               gpiod_set_value(bdev->device_wakeup, true);
+               BT_DBG("resume, delaying 15 ms");
+               mdelay(15);
+       }
+
+       /* When this callback executes, the device has woken up already */
+       if (bdev->is_suspended) {
+               bdev->is_suspended = false;
+
+               hci_uart_set_flow_control(bdev->hu, false);
+       }
+
+       return 0;
+}
+#endif
+
+static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
+static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
+       { "device-wakeup-gpios", &device_wakeup_gpios, 1 },
+       { "shutdown-gpios", &shutdown_gpios, 1 },
+       { },
+};
+
+#ifdef CONFIG_ACPI
+static int bcm_resource(struct acpi_resource *ares, void *data)
+{
+       struct bcm_device *dev = data;
+
+       if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+               struct acpi_resource_uart_serialbus *sb;
+
+               sb = &ares->data.uart_serial_bus;
+               if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
+                       dev->init_speed = sb->default_baud_rate;
+       }
+
+       /* Always tell the ACPI core to skip this resource */
+       return 1;
+}
+
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+       struct platform_device *pdev = dev->pdev;
+       const struct acpi_device_id *id;
+       struct acpi_device *adev;
+       LIST_HEAD(resources);
+       int ret;
+
+       id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+       if (!id)
+               return -ENODEV;
+
+       /* Retrieve GPIO data */
+       dev->name = dev_name(&pdev->dev);
+       ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+                                       acpi_bcm_default_gpios);
+       if (ret)
+               return ret;
+
+       dev->clk = devm_clk_get(&pdev->dev, NULL);
+
+       dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev,
+                                                    "device-wakeup",
+                                                    GPIOD_OUT_LOW);
+       if (IS_ERR(dev->device_wakeup))
+               return PTR_ERR(dev->device_wakeup);
+
+       dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown",
+                                               GPIOD_OUT_LOW);
+       if (IS_ERR(dev->shutdown))
+               return PTR_ERR(dev->shutdown);
+
+       /* Make sure at-least one of the GPIO is defined and that
+        * a name is specified for this instance
+        */
+       if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) {
+               dev_err(&pdev->dev, "invalid platform data\n");
+               return -EINVAL;
+       }
+
+       /* Retrieve UART ACPI info */
+       adev = ACPI_COMPANION(&dev->pdev->dev);
+       if (!adev)
+               return 0;
+
+       acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
+
+       return 0;
+}
+#else
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+static int bcm_probe(struct platform_device *pdev)
+{
+       struct bcm_device *dev;
+       struct acpi_device_id *pdata = pdev->dev.platform_data;
+       int ret;
+
+       dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       dev->pdev = pdev;
+
+       if (ACPI_HANDLE(&pdev->dev)) {
+               ret = bcm_acpi_probe(dev);
+               if (ret)
+                       return ret;
+       } else if (pdata) {
+               dev->name = pdata->id;
+       } else {
+               return -ENODEV;
+       }
+
+       platform_set_drvdata(pdev, dev);
+
+       dev_info(&pdev->dev, "%s device registered.\n", dev->name);
+
+       /* Place this instance on the device list */
+       spin_lock(&bcm_device_list_lock);
+       list_add_tail(&dev->list, &bcm_device_list);
+       spin_unlock(&bcm_device_list_lock);
+
+       bcm_gpio_set_power(dev, false);
+
+       return 0;
+}
+
+static int bcm_remove(struct platform_device *pdev)
+{
+       struct bcm_device *dev = platform_get_drvdata(pdev);
+
+       spin_lock(&bcm_device_list_lock);
+       list_del(&dev->list);
+       spin_unlock(&bcm_device_list_lock);
+
+       acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
+
+       dev_info(&pdev->dev, "%s device unregistered.\n", dev->name);
+
+       return 0;
+}
+
 static const struct hci_uart_proto bcm_proto = {
        .id             = HCI_UART_BCM,
        .name           = "BCM",
@@ -247,12 +527,38 @@ static const struct hci_uart_proto bcm_proto = {
        .dequeue        = bcm_dequeue,
 };
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id bcm_acpi_match[] = {
+       { "BCM2E39", 0 },
+       { "BCM2E67", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
+#endif
+
+/* Platform suspend and resume callbacks */
+static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
+
+static struct platform_driver bcm_driver = {
+       .probe = bcm_probe,
+       .remove = bcm_remove,
+       .driver = {
+               .name = "hci_bcm",
+               .acpi_match_table = ACPI_PTR(bcm_acpi_match),
+               .pm = &bcm_pm_ops,
+       },
+};
+
 int __init bcm_init(void)
 {
+       platform_driver_register(&bcm_driver);
+
        return hci_uart_register_proto(&bcm_proto);
 }
 
 int __exit bcm_deinit(void)
 {
+       platform_driver_unregister(&bcm_driver);
+
        return hci_uart_unregister_proto(&bcm_proto);
 }
index 20c2ac193ff972a9ba8717092f285f81f9aaad59..0d5a05a7c1fd1d46a8b77d71b94f97a9df4190b3 100644 (file)
@@ -810,6 +810,9 @@ static int __init hci_uart_init(void)
 #ifdef CONFIG_BT_HCIUART_BCM
        bcm_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_QCA
+       qca_init();
+#endif
 
        return 0;
 }
@@ -839,6 +842,9 @@ static void __exit hci_uart_exit(void)
 #ifdef CONFIG_BT_HCIUART_BCM
        bcm_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_QCA
+       qca_deinit();
+#endif
 
        /* Release tty registration of line discipline */
        err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
new file mode 100644 (file)
index 0000000..6b9b912
--- /dev/null
@@ -0,0 +1,969 @@
+/*
+ *  Bluetooth Software UART Qualcomm protocol
+ *
+ *  HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
+ *  protocol extension to H4.
+ *
+ *  Copyright (C) 2007 Texas Instruments, Inc.
+ *  Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
+ *
+ *  Acknowledgements:
+ *  This file is based on hci_ll.c, which was...
+ *  Written by Ohad Ben-Cohen <ohad@bencohen.org>
+ *  which was in turn based on hci_h4.c, which was written
+ *  by Maxim Krasnyansky and Marcel Holtmann.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+#include "btqca.h"
+
+/* HCI_IBS protocol messages */
+#define HCI_IBS_SLEEP_IND      0xFE
+#define HCI_IBS_WAKE_IND       0xFD
+#define HCI_IBS_WAKE_ACK       0xFC
+#define HCI_MAX_IBS_SIZE       10
+
+/* Controller states */
+#define STATE_IN_BAND_SLEEP_ENABLED    1
+
+#define IBS_WAKE_RETRANS_TIMEOUT_MS    100
+#define IBS_TX_IDLE_TIMEOUT_MS                 2000
+#define BAUDRATE_SETTLE_TIMEOUT_MS     300
+
+/* HCI_IBS transmit side sleep protocol states */
+enum tx_ibs_states {
+       HCI_IBS_TX_ASLEEP,
+       HCI_IBS_TX_WAKING,
+       HCI_IBS_TX_AWAKE,
+};
+
+/* HCI_IBS receive side sleep protocol states */
+enum rx_states {
+       HCI_IBS_RX_ASLEEP,
+       HCI_IBS_RX_AWAKE,
+};
+
+/* HCI_IBS transmit and receive side clock state vote */
+enum hci_ibs_clock_state_vote {
+       HCI_IBS_VOTE_STATS_UPDATE,
+       HCI_IBS_TX_VOTE_CLOCK_ON,
+       HCI_IBS_TX_VOTE_CLOCK_OFF,
+       HCI_IBS_RX_VOTE_CLOCK_ON,
+       HCI_IBS_RX_VOTE_CLOCK_OFF,
+};
+
+struct qca_data {
+       struct hci_uart *hu;
+       struct sk_buff *rx_skb;
+       struct sk_buff_head txq;
+       struct sk_buff_head tx_wait_q;  /* HCI_IBS wait queue   */
+       spinlock_t hci_ibs_lock;        /* HCI_IBS state lock   */
+       u8 tx_ibs_state;        /* HCI_IBS transmit side power state*/
+       u8 rx_ibs_state;        /* HCI_IBS receive side power state */
+       u32 tx_vote;            /* Clock must be on for TX */
+       u32 rx_vote;            /* Clock must be on for RX */
+       struct timer_list tx_idle_timer;
+       u32 tx_idle_delay;
+       struct timer_list wake_retrans_timer;
+       u32 wake_retrans;
+       struct workqueue_struct *workqueue;
+       struct work_struct ws_awake_rx;
+       struct work_struct ws_awake_device;
+       struct work_struct ws_rx_vote_off;
+       struct work_struct ws_tx_vote_off;
+       unsigned long flags;
+
+       /* For debugging purpose */
+       u64 ibs_sent_wacks;
+       u64 ibs_sent_slps;
+       u64 ibs_sent_wakes;
+       u64 ibs_recv_wacks;
+       u64 ibs_recv_slps;
+       u64 ibs_recv_wakes;
+       u64 vote_last_jif;
+       u32 vote_on_ms;
+       u32 vote_off_ms;
+       u64 tx_votes_on;
+       u64 rx_votes_on;
+       u64 tx_votes_off;
+       u64 rx_votes_off;
+       u64 votes_on;
+       u64 votes_off;
+};
+
+static void __serial_clock_on(struct tty_struct *tty)
+{
+       /* TODO: Some chipset requires to enable UART clock on client
+        * side to save power consumption or manual work is required.
+        * Please put your code to control UART clock here if needed
+        */
+}
+
+static void __serial_clock_off(struct tty_struct *tty)
+{
+       /* TODO: Some chipset requires to disable UART clock on client
+        * side to save power consumption or manual work is required.
+        * Please put your code to control UART clock off here if needed
+        */
+}
+
+/* serial_clock_vote needs to be called with the ibs lock held */
+static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
+{
+       struct qca_data *qca = hu->priv;
+       unsigned int diff;
+
+       bool old_vote = (qca->tx_vote | qca->rx_vote);
+       bool new_vote;
+
+       switch (vote) {
+       case HCI_IBS_VOTE_STATS_UPDATE:
+               diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+               if (old_vote)
+                       qca->vote_off_ms += diff;
+               else
+                       qca->vote_on_ms += diff;
+               return;
+
+       case HCI_IBS_TX_VOTE_CLOCK_ON:
+               qca->tx_vote = true;
+               qca->tx_votes_on++;
+               new_vote = true;
+               break;
+
+       case HCI_IBS_RX_VOTE_CLOCK_ON:
+               qca->rx_vote = true;
+               qca->rx_votes_on++;
+               new_vote = true;
+               break;
+
+       case HCI_IBS_TX_VOTE_CLOCK_OFF:
+               qca->tx_vote = false;
+               qca->tx_votes_off++;
+               new_vote = qca->rx_vote | qca->tx_vote;
+               break;
+
+       case HCI_IBS_RX_VOTE_CLOCK_OFF:
+               qca->rx_vote = false;
+               qca->rx_votes_off++;
+               new_vote = qca->rx_vote | qca->tx_vote;
+               break;
+
+       default:
+               BT_ERR("Voting irregularity");
+               return;
+       }
+
+       if (new_vote != old_vote) {
+               if (new_vote)
+                       __serial_clock_on(hu->tty);
+               else
+                       __serial_clock_off(hu->tty);
+
+               BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false",
+                      vote? "true" : "false");
+
+               diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+               if (new_vote) {
+                       qca->votes_on++;
+                       qca->vote_off_ms += diff;
+               } else {
+                       qca->votes_off++;
+                       qca->vote_on_ms += diff;
+               }
+               qca->vote_last_jif = jiffies;
+       }
+}
+
+/* Builds and sends an HCI_IBS command packet.
+ * These are very simple packets with only 1 cmd byte.
+ */
+static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
+{
+       int err = 0;
+       struct sk_buff *skb = NULL;
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
+
+       skb = bt_skb_alloc(1, GFP_ATOMIC);
+       if (!skb) {
+               BT_ERR("Failed to allocate memory for HCI_IBS packet");
+               return -ENOMEM;
+       }
+
+       /* Assign HCI_IBS type */
+       *skb_put(skb, 1) = cmd;
+
+       skb_queue_tail(&qca->txq, skb);
+
+       return err;
+}
+
+static void qca_wq_awake_device(struct work_struct *work)
+{
+       struct qca_data *qca = container_of(work, struct qca_data,
+                                           ws_awake_device);
+       struct hci_uart *hu = qca->hu;
+       unsigned long retrans_delay;
+
+       BT_DBG("hu %p wq awake device", hu);
+
+       /* Vote for serial clock */
+       serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
+
+       spin_lock(&qca->hci_ibs_lock);
+
+       /* Send wake indication to device */
+       if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
+               BT_ERR("Failed to send WAKE to device");
+
+       qca->ibs_sent_wakes++;
+
+       /* Start retransmit timer */
+       retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+       mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+
+       spin_unlock(&qca->hci_ibs_lock);
+
+       /* Actually send the packets */
+       hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_awake_rx(struct work_struct *work)
+{
+       struct qca_data *qca = container_of(work, struct qca_data,
+                                           ws_awake_rx);
+       struct hci_uart *hu = qca->hu;
+
+       BT_DBG("hu %p wq awake rx", hu);
+
+       serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+
+       spin_lock(&qca->hci_ibs_lock);
+       qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
+
+       /* Always acknowledge device wake up,
+        * sending IBS message doesn't count as TX ON.
+        */
+       if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
+               BT_ERR("Failed to acknowledge device wake up");
+
+       qca->ibs_sent_wacks++;
+
+       spin_unlock(&qca->hci_ibs_lock);
+
+       /* Actually send the packets */
+       hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
+{
+       struct qca_data *qca = container_of(work, struct qca_data,
+                                           ws_rx_vote_off);
+       struct hci_uart *hu = qca->hu;
+
+       BT_DBG("hu %p rx clock vote off", hu);
+
+       serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
+}
+
+static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
+{
+       struct qca_data *qca = container_of(work, struct qca_data,
+                                           ws_tx_vote_off);
+       struct hci_uart *hu = qca->hu;
+
+       BT_DBG("hu %p tx clock vote off", hu);
+
+       /* Run HCI tx handling unlocked */
+       hci_uart_tx_wakeup(hu);
+
+       /* Now that message queued to tty driver, vote for tty clocks off.
+        * It is up to the tty driver to pend the clocks off until tx done.
+        */
+       serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+}
+
+static void hci_ibs_tx_idle_timeout(unsigned long arg)
+{
+       struct hci_uart *hu = (struct hci_uart *)arg;
+       struct qca_data *qca = hu->priv;
+       unsigned long flags;
+
+       BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
+
+       spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+                                flags, SINGLE_DEPTH_NESTING);
+
+       switch (qca->tx_ibs_state) {
+       case HCI_IBS_TX_AWAKE:
+               /* TX_IDLE, go to SLEEP */
+               if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
+                       BT_ERR("Failed to send SLEEP to device");
+                       break;
+               }
+               qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+               qca->ibs_sent_slps++;
+               queue_work(qca->workqueue, &qca->ws_tx_vote_off);
+               break;
+
+       case HCI_IBS_TX_ASLEEP:
+       case HCI_IBS_TX_WAKING:
+               /* Fall through */
+
+       default:
+               BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+static void hci_ibs_wake_retrans_timeout(unsigned long arg)
+{
+       struct hci_uart *hu = (struct hci_uart *)arg;
+       struct qca_data *qca = hu->priv;
+       unsigned long flags, retrans_delay;
+       unsigned long retransmit = 0;
+
+       BT_DBG("hu %p wake retransmit timeout in %d state",
+               hu, qca->tx_ibs_state);
+
+       spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+                                flags, SINGLE_DEPTH_NESTING);
+
+       switch (qca->tx_ibs_state) {
+       case HCI_IBS_TX_WAKING:
+               /* No WAKE_ACK, retransmit WAKE */
+               retransmit = 1;
+               if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+                       BT_ERR("Failed to acknowledge device wake up");
+                       break;
+               }
+               qca->ibs_sent_wakes++;
+               retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+               mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+               break;
+
+       case HCI_IBS_TX_ASLEEP:
+       case HCI_IBS_TX_AWAKE:
+               /* Fall through */
+
+       default:
+               BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+       if (retransmit)
+               hci_uart_tx_wakeup(hu);
+}
+
+/* Initialize protocol */
+static int qca_open(struct hci_uart *hu)
+{
+       struct qca_data *qca;
+
+       BT_DBG("hu %p qca_open", hu);
+
+       qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
+       if (!qca)
+               return -ENOMEM;
+
+       skb_queue_head_init(&qca->txq);
+       skb_queue_head_init(&qca->tx_wait_q);
+       spin_lock_init(&qca->hci_ibs_lock);
+       qca->workqueue = create_singlethread_workqueue("qca_wq");
+       if (!qca->workqueue) {
+               BT_ERR("QCA Workqueue not initialized properly");
+               kfree(qca);
+               return -ENOMEM;
+       }
+
+       INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
+       INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
+       INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
+       INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+
+       qca->hu = hu;
+
+       /* Assume we start with both sides asleep -- extra wakes OK */
+       qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+       qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+
+       /* clocks actually on, but we start votes off */
+       qca->tx_vote = false;
+       qca->rx_vote = false;
+       qca->flags = 0;
+
+       qca->ibs_sent_wacks = 0;
+       qca->ibs_sent_slps = 0;
+       qca->ibs_sent_wakes = 0;
+       qca->ibs_recv_wacks = 0;
+       qca->ibs_recv_slps = 0;
+       qca->ibs_recv_wakes = 0;
+       qca->vote_last_jif = jiffies;
+       qca->vote_on_ms = 0;
+       qca->vote_off_ms = 0;
+       qca->votes_on = 0;
+       qca->votes_off = 0;
+       qca->tx_votes_on = 0;
+       qca->tx_votes_off = 0;
+       qca->rx_votes_on = 0;
+       qca->rx_votes_off = 0;
+
+       hu->priv = qca;
+
+       init_timer(&qca->wake_retrans_timer);
+       qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
+       qca->wake_retrans_timer.data = (u_long)hu;
+       qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+
+       init_timer(&qca->tx_idle_timer);
+       qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
+       qca->tx_idle_timer.data = (u_long)hu;
+       qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+
+       BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
+              qca->tx_idle_delay, qca->wake_retrans);
+
+       return 0;
+}
+
+static void qca_debugfs_init(struct hci_dev *hdev)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+       struct qca_data *qca = hu->priv;
+       struct dentry *ibs_dir;
+       umode_t mode;
+
+       if (!hdev->debugfs)
+               return;
+
+       ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
+
+       /* read only */
+       mode = S_IRUGO;
+       debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
+       debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
+       debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
+                          &qca->ibs_sent_slps);
+       debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
+                          &qca->ibs_sent_wakes);
+       debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
+                          &qca->ibs_sent_wacks);
+       debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
+                          &qca->ibs_recv_slps);
+       debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
+                          &qca->ibs_recv_wakes);
+       debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
+                          &qca->ibs_recv_wacks);
+       debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
+       debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
+       debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
+       debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
+       debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
+       debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
+       debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
+       debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
+       debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
+       debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
+
+       /* read/write */
+       mode = S_IRUGO | S_IWUSR;
+       debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
+       debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
+                          &qca->tx_idle_delay);
+}
+
+/* Flush protocol data */
+static int qca_flush(struct hci_uart *hu)
+{
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p qca flush", hu);
+
+       skb_queue_purge(&qca->tx_wait_q);
+       skb_queue_purge(&qca->txq);
+
+       return 0;
+}
+
+/* Close protocol */
+static int qca_close(struct hci_uart *hu)
+{
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p qca close", hu);
+
+       serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
+
+       skb_queue_purge(&qca->tx_wait_q);
+       skb_queue_purge(&qca->txq);
+       del_timer(&qca->tx_idle_timer);
+       del_timer(&qca->wake_retrans_timer);
+       destroy_workqueue(qca->workqueue);
+       qca->hu = NULL;
+
+       kfree_skb(qca->rx_skb);
+
+       hu->priv = NULL;
+
+       kfree(qca);
+
+       return 0;
+}
+
+/* Called upon a wake-up-indication from the device.
+ */
+static void device_want_to_wakeup(struct hci_uart *hu)
+{
+       unsigned long flags;
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p want to wake up", hu);
+
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+       qca->ibs_recv_wakes++;
+
+       switch (qca->rx_ibs_state) {
+       case HCI_IBS_RX_ASLEEP:
+               /* Make sure clock is on - we may have turned clock off since
+                * receiving the wake up indicator awake rx clock.
+                */
+               queue_work(qca->workqueue, &qca->ws_awake_rx);
+               spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+               return;
+
+       case HCI_IBS_RX_AWAKE:
+               /* Always acknowledge device wake up,
+                * sending IBS message doesn't count as TX ON.
+                */
+               if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
+                       BT_ERR("Failed to acknowledge device wake up");
+                       break;
+               }
+               qca->ibs_sent_wacks++;
+               break;
+
+       default:
+               /* Any other state is illegal */
+               BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
+                      qca->rx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+       /* Actually send the packets */
+       hci_uart_tx_wakeup(hu);
+}
+
+/* Called upon a sleep-indication from the device.
+ */
+static void device_want_to_sleep(struct hci_uart *hu)
+{
+       unsigned long flags;
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p want to sleep", hu);
+
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+       qca->ibs_recv_slps++;
+
+       switch (qca->rx_ibs_state) {
+       case HCI_IBS_RX_AWAKE:
+               /* Update state */
+               qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+               /* Vote off rx clock under workqueue */
+               queue_work(qca->workqueue, &qca->ws_rx_vote_off);
+               break;
+
+       case HCI_IBS_RX_ASLEEP:
+               /* Fall through */
+
+       default:
+               /* Any other state is illegal */
+               BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
+                      qca->rx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+/* Called upon wake-up-acknowledgement from the device
+ */
+static void device_woke_up(struct hci_uart *hu)
+{
+       unsigned long flags, idle_delay;
+       struct qca_data *qca = hu->priv;
+       struct sk_buff *skb = NULL;
+
+       BT_DBG("hu %p woke up", hu);
+
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+       qca->ibs_recv_wacks++;
+
+       switch (qca->tx_ibs_state) {
+       case HCI_IBS_TX_AWAKE:
+               /* Expect one if we send 2 WAKEs */
+               BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
+                      qca->tx_ibs_state);
+               break;
+
+       case HCI_IBS_TX_WAKING:
+               /* Send pending packets */
+               while ((skb = skb_dequeue(&qca->tx_wait_q)))
+                       skb_queue_tail(&qca->txq, skb);
+
+               /* Switch timers and change state to HCI_IBS_TX_AWAKE */
+               del_timer(&qca->wake_retrans_timer);
+               idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+               mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+               qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
+               break;
+
+       case HCI_IBS_TX_ASLEEP:
+               /* Fall through */
+
+       default:
+               BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
+                      qca->tx_ibs_state);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+       /* Actually send the packets */
+       hci_uart_tx_wakeup(hu);
+}
+
+/* Enqueue frame for transmittion (padding, crc, etc) may be called from
+ * two simultaneous tasklets.
+ */
+static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+       unsigned long flags = 0, idle_delay;
+       struct qca_data *qca = hu->priv;
+
+       BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
+              qca->tx_ibs_state);
+
+       /* Prepend skb with frame type */
+       memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+       /* Don't go to sleep in middle of patch download or
+        * Out-Of-Band(GPIOs control) sleep is selected.
+        */
+       if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
+               skb_queue_tail(&qca->txq, skb);
+               return 0;
+       }
+
+       spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+       /* Act according to current state */
+       switch (qca->tx_ibs_state) {
+       case HCI_IBS_TX_AWAKE:
+               BT_DBG("Device awake, sending normally");
+               skb_queue_tail(&qca->txq, skb);
+               idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+               mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+               break;
+
+       case HCI_IBS_TX_ASLEEP:
+               BT_DBG("Device asleep, waking up and queueing packet");
+               /* Save packet for later */
+               skb_queue_tail(&qca->tx_wait_q, skb);
+
+               qca->tx_ibs_state = HCI_IBS_TX_WAKING;
+               /* Schedule a work queue to wake up device */
+               queue_work(qca->workqueue, &qca->ws_awake_device);
+               break;
+
+       case HCI_IBS_TX_WAKING:
+               BT_DBG("Device waking up, queueing packet");
+               /* Transient state; just keep packet for later */
+               skb_queue_tail(&qca->tx_wait_q, skb);
+               break;
+
+       default:
+               BT_ERR("Illegal tx state: %d (losing packet)",
+                      qca->tx_ibs_state);
+               kfree_skb(skb);
+               break;
+       }
+
+       spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+       return 0;
+}
+
+static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+
+       BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
+
+       device_want_to_sleep(hu);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+
+       BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
+
+       device_want_to_wakeup(hu);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+
+       BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
+
+       device_woke_up(hu);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+#define QCA_IBS_SLEEP_IND_EVENT \
+       .type = HCI_IBS_SLEEP_IND, \
+       .hlen = 0, \
+       .loff = 0, \
+       .lsize = 0, \
+       .maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_IND_EVENT \
+       .type = HCI_IBS_WAKE_IND, \
+       .hlen = 0, \
+       .loff = 0, \
+       .lsize = 0, \
+       .maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_ACK_EVENT \
+       .type = HCI_IBS_WAKE_ACK, \
+       .hlen = 0, \
+       .loff = 0, \
+       .lsize = 0, \
+       .maxlen = HCI_MAX_IBS_SIZE
+
+static const struct h4_recv_pkt qca_recv_pkts[] = {
+       { H4_RECV_ACL,             .recv = hci_recv_frame    },
+       { H4_RECV_SCO,             .recv = hci_recv_frame    },
+       { H4_RECV_EVENT,           .recv = hci_recv_frame    },
+       { QCA_IBS_WAKE_IND_EVENT,  .recv = qca_ibs_wake_ind  },
+       { QCA_IBS_WAKE_ACK_EVENT,  .recv = qca_ibs_wake_ack  },
+       { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
+};
+
+static int qca_recv(struct hci_uart *hu, const void *data, int count)
+{
+       struct qca_data *qca = hu->priv;
+
+       if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+               return -EUNATCH;
+
+       qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+                                 qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
+       if (IS_ERR(qca->rx_skb)) {
+               int err = PTR_ERR(qca->rx_skb);
+               BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+               qca->rx_skb = NULL;
+               return err;
+       }
+
+       return count;
+}
+
+static struct sk_buff *qca_dequeue(struct hci_uart *hu)
+{
+       struct qca_data *qca = hu->priv;
+
+       return skb_dequeue(&qca->txq);
+}
+
+static uint8_t qca_get_baudrate_value(int speed)
+{
+       switch(speed) {
+       case 9600:
+               return QCA_BAUDRATE_9600;
+       case 19200:
+               return QCA_BAUDRATE_19200;
+       case 38400:
+               return QCA_BAUDRATE_38400;
+       case 57600:
+               return QCA_BAUDRATE_57600;
+       case 115200:
+               return QCA_BAUDRATE_115200;
+       case 230400:
+               return QCA_BAUDRATE_230400;
+       case 460800:
+               return QCA_BAUDRATE_460800;
+       case 500000:
+               return QCA_BAUDRATE_500000;
+       case 921600:
+               return QCA_BAUDRATE_921600;
+       case 1000000:
+               return QCA_BAUDRATE_1000000;
+       case 2000000:
+               return QCA_BAUDRATE_2000000;
+       case 3000000:
+               return QCA_BAUDRATE_3000000;
+       case 3500000:
+               return QCA_BAUDRATE_3500000;
+       default:
+               return QCA_BAUDRATE_115200;
+       }
+}
+
+static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+       struct qca_data *qca = hu->priv;
+       struct sk_buff *skb;
+       u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
+
+       if (baudrate > QCA_BAUDRATE_3000000)
+               return -EINVAL;
+
+       cmd[4] = baudrate;
+
+       skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
+       if (!skb) {
+               BT_ERR("Failed to allocate memory for baudrate packet");
+               return -ENOMEM;
+       }
+
+       /* Assign commands to change baudrate and packet type. */
+       memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+       bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+
+       skb_queue_tail(&qca->txq, skb);
+       hci_uart_tx_wakeup(hu);
+
+       /* wait 300ms to change new baudrate on controller side
+        * controller will come back after they receive this HCI command
+        * then host can communicate with new baudrate to controller
+        */
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
+       set_current_state(TASK_INTERRUPTIBLE);
+
+       return 0;
+}
+
+static int qca_setup(struct hci_uart *hu)
+{
+       struct hci_dev *hdev = hu->hdev;
+       struct qca_data *qca = hu->priv;
+       unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+       int ret;
+
+       BT_INFO("%s: ROME setup", hdev->name);
+
+       /* Patch downloading has to be done without IBS mode */
+       clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+
+       /* Setup initial baudrate */
+       speed = 0;
+       if (hu->init_speed)
+               speed = hu->init_speed;
+       else if (hu->proto->init_speed)
+               speed = hu->proto->init_speed;
+
+       if (speed)
+               hci_uart_set_baudrate(hu, speed);
+
+       /* Setup user speed if needed */
+       speed = 0;
+       if (hu->oper_speed)
+               speed = hu->oper_speed;
+       else if (hu->proto->oper_speed)
+               speed = hu->proto->oper_speed;
+
+       if (speed) {
+               qca_baudrate = qca_get_baudrate_value(speed);
+
+               BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
+               ret = qca_set_baudrate(hdev, qca_baudrate);
+               if (ret) {
+                       BT_ERR("%s: Failed to change the baud rate (%d)",
+                              hdev->name, ret);
+                       return ret;
+               }
+               hci_uart_set_baudrate(hu, speed);
+       }
+
+       /* Setup patch / NVM configurations */
+       ret = qca_uart_setup_rome(hdev, qca_baudrate);
+       if (!ret) {
+               set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+               qca_debugfs_init(hdev);
+       }
+
+       /* Setup bdaddr */
+       hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+       return ret;
+}
+
+static struct hci_uart_proto qca_proto = {
+       .id             = HCI_UART_QCA,
+       .name           = "QCA",
+       .init_speed     = 115200,
+       .oper_speed     = 3000000,
+       .open           = qca_open,
+       .close          = qca_close,
+       .flush          = qca_flush,
+       .setup          = qca_setup,
+       .recv           = qca_recv,
+       .enqueue        = qca_enqueue,
+       .dequeue        = qca_dequeue,
+};
+
+int __init qca_init(void)
+{
+       return hci_uart_register_proto(&qca_proto);
+}
+
+int __exit qca_deinit(void)
+{
+       return hci_uart_unregister_proto(&qca_proto);
+}
index 496587a73a9daa4a2a70ef92bd9fc04b0ef72dbf..495b9ef52bb0fd608519fda39c0b4726c95f3c7f 100644 (file)
@@ -35,7 +35,7 @@
 #define HCIUARTGETFLAGS                _IOR('U', 204, int)
 
 /* UART protocols */
-#define HCI_UART_MAX_PROTO     8
+#define HCI_UART_MAX_PROTO     9
 
 #define HCI_UART_H4    0
 #define HCI_UART_BCSP  1
@@ -45,6 +45,7 @@
 #define HCI_UART_ATH3K 5
 #define HCI_UART_INTEL 6
 #define HCI_UART_BCM   7
+#define HCI_UART_QCA   8
 
 #define HCI_UART_RAW_DEVICE    0
 #define HCI_UART_RESET_ON_INIT 1
@@ -176,3 +177,8 @@ int intel_deinit(void);
 int bcm_init(void);
 int bcm_deinit(void);
 #endif
+
+#ifdef CONFIG_BT_HCIUART_QCA
+int qca_init(void);
+int qca_deinit(void);
+#endif
index da8faf78536a3ae01827a2ee9480c486a04297a5..5643b65cee204d950d842529e0a12123f57e92c0 100644 (file)
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
 static void start_khwrngd(void)
 {
        hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
-       if (hwrng_fill == ERR_PTR(-ENOMEM)) {
+       if (IS_ERR(hwrng_fill)) {
                pr_err("hwrng_fill thread creation failed");
                hwrng_fill = NULL;
        }
index 4b93a1efb36d11fa7171735d29bac283e4bb6d97..ac03ba49e9d1952dff14e9383ed86874690a7176 100644 (file)
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
 PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
 PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
 
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
 #define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
                    div_hp, bit, is_lp, flags)                          \
        PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
index b8ff3c64cc452a16fc4108426fb6e5b1c54e91e8..c96de14036a0adebfc7628dc9f9cd5413b5c5495 100644 (file)
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
        pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
 }
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
        sh_cmt_start(ch, FLAG_CLOCKSOURCE);
 }
index ae5b2bd3a9785c63646e3e922fbe17330678b481..fa3dd840a83771735e474a658a5c6516c62f76a0 100644 (file)
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
                ret = exynos5250_cpufreq_init(exynos_info);
        } else {
                pr_err("%s: Unknown SoC type\n", __func__);
-               return -ENODEV;
+               ret = -ENODEV;
        }
 
        if (ret)
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
 
        if (exynos_info->set_freq == NULL) {
                dev_err(&pdev->dev, "No set_freq function (ERR)\n");
+               ret = -EINVAL;
                goto err_vdd_arm;
        }
 
        arm_regulator = regulator_get(NULL, "vdd_arm");
        if (IS_ERR(arm_regulator)) {
                dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
+               ret = -EINVAL;
                goto err_vdd_arm;
        }
 
@@ -225,7 +227,7 @@ err_cpufreq_reg:
        regulator_put(arm_regulator);
 err_vdd_arm:
        kfree(exynos_info);
-       return -EINVAL;
+       return ret;
 }
 
 static struct platform_driver exynos_cpufreq_platdrv = {
index e362860c2b50c49ad5289169e68b8baa2a90197c..cd593c1f66dc8af8a6208933003783e0f37b7392 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/clock.h>
 #include <asm/idle.h>
 
-#include <asm/mach-loongson/loongson.h>
+#include <asm/mach-loongson64/loongson.h>
 
 static uint nowait;
 
index dae1e8099969a192b302703ec291da96ebac3429..f9c78751989ec865491570ed13bf19dbc6b1a799 100644 (file)
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
                          state->buflen_1;
        u32 *sh_desc = ctx->sh_desc_fin, *desc;
        dma_addr_t ptr = ctx->sh_desc_fin_dma;
-       int sec4_sg_bytes;
+       int sec4_sg_bytes, sec4_sg_src_index;
        int digestsize = crypto_ahash_digestsize(ahash);
        struct ahash_edesc *edesc;
        int ret = 0;
        int sh_len;
 
-       sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+       sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+       sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
        edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
                                                buf, state->buf_dma, buflen,
                                                last_buflen);
-       (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
index 7ba495f7537042f898ef1cd7cdba7a6263f2059b..402631a19a112770af83f0f4228176703e1c0b44 100644 (file)
@@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
                crypt->mode |= NPE_OP_NOT_IN_PLACE;
                /* This was never tested by Intel
                 * for more than one dst buffer, I think. */
-               BUG_ON(req->dst->length < nbytes);
                req_ctx->dst = NULL;
                if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
                                        flags, DMA_FROM_DEVICE))
index 08f8d5cd633491e3ff0e28ca8204d7f51be2b05b..becb738c897b1b5d93b632e3ab80ed2b146ead5a 100644 (file)
@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        struct sha256_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-       struct nx_sg *in_sg;
        struct nx_sg *out_sg;
        u64 to_process = 0, leftover, total;
        unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       in_sg = nx_ctx->in_sg;
        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
        max_sg_len = min_t(u64, max_sg_len,
@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        }
 
        do {
-               /*
-                * to_process: the SHA256_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - to_process;
-               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                       in_sg = nx_build_sg_list(in_sg,
                                                 (u8 *) sctx->buf,
                                                 &data_len,
                                                 max_sg_len);
@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
                                rc = -EINVAL;
                                goto out;
                        }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         &data_len, max_sg_len);
 
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
index aff0fe58eac0b7aba11b465a192c280ef19fdbac..b6e183d58d73d5a4e38fff2925344783e8e581bc 100644 (file)
@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        struct sha512_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-       struct nx_sg *in_sg;
        struct nx_sg *out_sg;
        u64 to_process, leftover = 0, total;
        unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       in_sg = nx_ctx->in_sg;
        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
        max_sg_len = min_t(u64, max_sg_len,
@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        }
 
        do {
-               /*
-                * to_process: the SHA512_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - leftover;
-               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-               leftover = total - to_process;
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                       in_sg = nx_build_sg_list(in_sg,
                                                 (u8 *) sctx->buf,
                                                 &data_len, max_sg_len);
 
@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
                                rc = -EINVAL;
                                goto out;
                        }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         &data_len, max_sg_len);
@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
                        goto out;
                }
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
index 067402c7c2a93fdc02ca3242f918deba460dae91..df427c0e9e7b2c99c8ee6cbe0c91b98c1ff47c43 100644 (file)
@@ -73,7 +73,8 @@
                                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
                                       ICP_QAT_HW_CIPHER_DECRYPT)
 
-static atomic_t active_dev;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
 
 struct qat_alg_buf {
        uint32_t len;
@@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { {
 
 int qat_algs_register(void)
 {
-       if (atomic_add_return(1, &active_dev) == 1) {
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs == 1) {
                int i;
 
                for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1289,21 +1293,25 @@ int qat_algs_register(void)
                                CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
                                CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 
-               return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+               ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
        }
-       return 0;
+       mutex_unlock(&algs_lock);
+       return ret;
 }
 
 int qat_algs_unregister(void)
 {
-       if (atomic_sub_return(1, &active_dev) == 0)
-               return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
-       return 0;
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (--active_devs == 0)
+               ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       mutex_unlock(&algs_lock);
+       return ret;
 }
 
 int qat_algs_init(void)
 {
-       atomic_set(&active_dev, 0);
        crypto_get_default_rng();
        return 0;
 }
index 59892126d1758f9d0e0821fa88ff62094a0c6e74..d3629b7482dda55858a942d084916396f6f473c0 100644 (file)
@@ -48,6 +48,8 @@
        BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
        BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 
+#define ATC_MAX_DSCR_TRIALS    10
+
 /*
  * Initial number of descriptors to allocate for each channel. This could
  * be increased during dma usage.
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
  *
  * @current_len: the number of bytes left before reading CTRLA
  * @ctrla: the value of CTRLA
- * @desc: the descriptor containing the transfer width
  */
-static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
-                                       struct at_desc *desc)
+static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
 {
-       return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
-}
+       u32 btsize = (ctrla & ATC_BTSIZE_MAX);
+       u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
 
-/**
- * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
- * to the current value of CTRLA.
- *
- * @current_len: the number of bytes left before reading CTRLA
- * @atchan: the channel to read CTRLA for
- * @desc: the descriptor containing the transfer width
- */
-static inline int atc_calc_bytes_left_from_reg(int current_len,
-                       struct at_dma_chan *atchan, struct at_desc *desc)
-{
-       u32 ctrla = channel_readl(atchan, CTRLA);
-
-       return atc_calc_bytes_left(current_len, ctrla, desc);
+       /*
+        * According to the datasheet, when reading the Control A Register
+        * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
+        * number of transfers completed on the Source Interface.
+        * So btsize is always a number of source width transfers.
+        */
+       return current_len - (btsize << src_width);
 }
 
 /**
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
        struct at_desc *desc_first = atc_first_active(atchan);
        struct at_desc *desc;
        int ret;
-       u32 ctrla, dscr;
+       u32 ctrla, dscr, trials;
 
        /*
         * If the cookie doesn't match to the currently running transfer then
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
                 * the channel's DSCR register and compare it against the value
                 * of the hardware linked list structure of each child
                 * descriptor.
+                *
+                * The CTRLA register provides us with the amount of data
+                * already read from the source for the current child
+                * descriptor. So we can compute a more accurate residue by also
+                * removing the number of bytes corresponding to this amount of
+                * data.
+                *
+                * However, the DSCR and CTRLA registers cannot be read both
+                * atomically. Hence a race condition may occur: the first read
+                * register may refer to one child descriptor whereas the second
+                * read may refer to a later child descriptor in the list
+                * because of the DMA transfer progression inbetween the two
+                * reads.
+                *
+                * One solution could have been to pause the DMA transfer, read
+                * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
+                * this approach presents some drawbacks:
+                * - If the DMA transfer is paused, RX overruns or TX underruns
+                *   are more likey to occur depending on the system latency.
+                *   Taking the USART driver as an example, it uses a cyclic DMA
+                *   transfer to read data from the Receive Holding Register
+                *   (RHR) to avoid RX overruns since the RHR is not protected
+                *   by any FIFO on most Atmel SoCs. So pausing the DMA transfer
+                *   to compute the residue would break the USART driver design.
+                * - The atc_pause() function masks interrupts but we'd rather
+                *   avoid to do so for system latency purpose.
+                *
+                * Then we'd rather use another solution: the DSCR is read a
+                * first time, the CTRLA is read in turn, next the DSCR is read
+                * a second time. If the two consecutive read values of the DSCR
+                * are the same then we assume both refers to the very same
+                * child descriptor as well as the CTRLA value read inbetween
+                * does. For cyclic tranfers, the assumption is that a full loop
+                * is "not so fast".
+                * If the two DSCR values are different, we read again the CTRLA
+                * then the DSCR till two consecutive read values from DSCR are
+                * equal or till the maxium trials is reach.
+                * This algorithm is very unlikely not to find a stable value for
+                * DSCR.
                 */
 
-               ctrla = channel_readl(atchan, CTRLA);
-               rmb(); /* ensure CTRLA is read before DSCR */
                dscr = channel_readl(atchan, DSCR);
+               rmb(); /* ensure DSCR is read before CTRLA */
+               ctrla = channel_readl(atchan, CTRLA);
+               for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
+                       u32 new_dscr;
+
+                       rmb(); /* ensure DSCR is read after CTRLA */
+                       new_dscr = channel_readl(atchan, DSCR);
+
+                       /*
+                        * If the DSCR register value has not changed inside the
+                        * DMA controller since the previous read, we assume
+                        * that both the dscr and ctrla values refers to the
+                        * very same descriptor.
+                        */
+                       if (likely(new_dscr == dscr))
+                               break;
+
+                       /*
+                        * DSCR has changed inside the DMA controller, so the
+                        * previouly read value of CTRLA may refer to an already
+                        * processed descriptor hence could be outdated.
+                        * We need to update ctrla to match the current
+                        * descriptor.
+                        */
+                       dscr = new_dscr;
+                       rmb(); /* ensure DSCR is read before CTRLA */
+                       ctrla = channel_readl(atchan, CTRLA);
+               }
+               if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
+                       return -ETIMEDOUT;
 
                /* for the first descriptor we can be more accurate */
                if (desc_first->lli.dscr == dscr)
-                       return atc_calc_bytes_left(ret, ctrla, desc_first);
+                       return atc_calc_bytes_left(ret, ctrla);
 
                ret -= desc_first->len;
                list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
                }
 
                /*
-                * For the last descriptor in the chain we can calculate
+                * For the current descriptor in the chain we can calculate
                 * the remaining bytes using the channel's register.
-                * Note that the transfer width of the first and last
-                * descriptor may differ.
                 */
-               if (!desc->lli.dscr)
-                       ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
+               ret = atc_calc_bytes_left(ret, ctrla);
        } else {
                /* single transfer */
-               ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
+               ctrla = channel_readl(atchan, CTRLA);
+               ret = atc_calc_bytes_left(ret, ctrla);
        }
 
        return ret;
@@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
 
        desc->txd.cookie = -EBUSY;
        desc->total_len = desc->len = len;
-       desc->tx_width = dwidth;
 
        /* set end-of-link to the last link descriptor of list*/
        set_desc_eol(desc);
@@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
        first->txd.cookie = -EBUSY;
        first->total_len = len;
 
-       /* set transfer width for the calculation of the residue */
-       first->tx_width = src_width;
-       prev->tx_width = src_width;
-
        /* set end-of-link to the last link descriptor of list*/
        set_desc_eol(desc);
 
@@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        first->txd.cookie = -EBUSY;
        first->total_len = total_len;
 
-       /* set transfer width for the calculation of the residue */
-       first->tx_width = reg_width;
-       prev->tx_width = reg_width;
-
        /* first link descriptor of list is responsible of flags */
        first->txd.flags = flags; /* client is in control of this ack */
 
@@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
                desc->txd.cookie = 0;
                desc->len = len;
 
-               /*
-                * Although we only need the transfer width for the first and
-                * the last descriptor, its easier to set it to all descriptors.
-                */
-               desc->tx_width = src_width;
-
                atc_desc_chain(&first, &prev, desc);
 
                /* update the lengths and addresses for the next loop cycle */
@@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
        /* First descriptor of the chain embedds additional information */
        first->txd.cookie = -EBUSY;
        first->total_len = buf_len;
-       first->tx_width = reg_width;
 
        return &first->txd;
 
index bc8d5ebedd192f12f9ed32f220c45892c064a8d0..7f5a08230f76de64e044964f8ff49b5051a9bbb2 100644 (file)
 #define                ATC_SRC_WIDTH_BYTE      (0x0 << 24)
 #define                ATC_SRC_WIDTH_HALFWORD  (0x1 << 24)
 #define                ATC_SRC_WIDTH_WORD      (0x2 << 24)
+#define                ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3)
 #define        ATC_DST_WIDTH_MASK      (0x3 << 28)     /* Destination Single Transfer Size */
 #define                ATC_DST_WIDTH(x)        ((x) << 28)
 #define                ATC_DST_WIDTH_BYTE      (0x0 << 28)
@@ -182,7 +183,6 @@ struct at_lli {
  * @txd: support for the async_tx api
  * @desc_node: node on the channed descriptors list
  * @len: descriptor byte count
- * @tx_width: transfer width
  * @total_len: total transaction byte count
  */
 struct at_desc {
@@ -194,7 +194,6 @@ struct at_desc {
        struct dma_async_tx_descriptor  txd;
        struct list_head                desc_node;
        size_t                          len;
-       u32                             tx_width;
        size_t                          total_len;
 
        /* Interleaved data */
index cf1213de7865ecb95e20c9d273a57ec1981203a7..40afa2a16cfc00f17d696833e75d250dabda0e6c 100644 (file)
@@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
         * descriptor view 2 since some fields of the configuration register
         * depend on transfer size and src/dest addresses.
         */
-       if (at_xdmac_chan_is_cyclic(atchan)) {
+       if (at_xdmac_chan_is_cyclic(atchan))
                reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
-               at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
-       } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
+       else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
                reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
-       } else {
-               /*
-                * No need to write AT_XDMAC_CC reg, it will be done when the
-                * descriptor is fecthed.
-                */
+       else
                reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
-       }
+       /*
+        * Even if the register will be updated from the configuration in the
+        * descriptor when using view 2 or higher, the PROT bit won't be set
+        * properly. This bit can be modified only by using the channel
+        * configuration register.
+        */
+       at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
 
        reg |= AT_XDMAC_CNDC_NDDUP
               | AT_XDMAC_CNDC_NDSUP
@@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        desc->lld.mbr_sa = mem;
                        desc->lld.mbr_da = atchan->sconfig.dst_addr;
                }
-               desc->lld.mbr_cfg = atchan->cfg;
-               dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+               dwidth = at_xdmac_get_dwidth(atchan->cfg);
                fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
-                              ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
+                              ? dwidth
                               : AT_XDMAC_CC_DWIDTH_BYTE;
                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2                       /* next descriptor view */
                        | AT_XDMAC_MBR_UBC_NDEN                                 /* next descriptor dst parameter update */
                        | AT_XDMAC_MBR_UBC_NSEN                                 /* next descriptor src parameter update */
                        | (len >> fixed_dwidth);                                /* microblock length */
+               desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
+                                   AT_XDMAC_CC_DWIDTH(fixed_dwidth);
                dev_dbg(chan2dev(chan),
                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
index 4a4cce15f25dd65c6a720d949ed0d9c922ff1cba..3ff284c8e3d5aef72f229017c883c73cbe13403f 100644 (file)
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
        struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
        if (IS_ERR(ch))
                return NULL;
+
+       dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
+       ch->device->privatecnt++;
+
        return ch;
 }
 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
index fbaf1ead25971542ca81acd9f7ddc14161acf340..f1325f62563e2cdf957f33706d620570202d7faf 100644 (file)
@@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan,
        config &= ~0x7;
        config |= op_mode;
 
-       if (IS_ENABLED(__BIG_ENDIAN))
-               config |= XOR_DESCRIPTOR_SWAP;
-       else
-               config &= ~XOR_DESCRIPTOR_SWAP;
+#if defined(__BIG_ENDIAN)
+       config |= XOR_DESCRIPTOR_SWAP;
+#else
+       config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
 
        writel_relaxed(config, XOR_CONFIG(chan));
        chan->current_type = type;
index f513f77b1d85471ff0997f8d9f4755deca293621..ecab4ea059b4d5eec80a3f3354b5c1c2c36343c0 100644 (file)
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
                        desc->txd.callback = last->txd.callback;
                        desc->txd.callback_param = last->txd.callback_param;
                }
-               last->last = false;
+               desc->last = false;
 
                dma_cookie_assign(&desc->txd);
 
@@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
                desc->rqcfg.brst_len = 1;
 
        desc->rqcfg.brst_len = get_burst_len(desc, len);
+       desc->bytes_requested = len;
 
        desc->txd.flags = flags;
 
index 7d2c17d8d30fc1a4f1efd9c7471bc1e5cbdc794c..6f80432a3f0a3d74bf9a4612712ad6edf4875edc 100644 (file)
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
        spin_lock_irqsave(&vc->lock, flags);
        cookie = dma_cookie_assign(tx);
 
-       list_move_tail(&vd->node, &vc->desc_submitted);
+       list_add_tail(&vd->node, &vc->desc_submitted);
        spin_unlock_irqrestore(&vc->lock, flags);
 
        dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
                cb_data = vd->tx.callback_param;
 
                list_del(&vd->node);
-               if (async_tx_test_ack(&vd->tx))
-                       list_add(&vd->node, &vc->desc_allocated);
-               else
-                       vc->desc_free(vd);
+
+               vc->desc_free(vd);
 
                if (cb)
                        cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
        while (!list_empty(head)) {
                struct virt_dma_desc *vd = list_first_entry(head,
                        struct virt_dma_desc, node);
-               if (async_tx_test_ack(&vd->tx)) {
-                       list_move_tail(&vd->node, &vc->desc_allocated);
-               } else {
-                       dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
-                       list_del(&vd->node);
-                       vc->desc_free(vd);
-               }
+               list_del(&vd->node);
+               dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+               vc->desc_free(vd);
        }
 }
 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
        dma_cookie_init(&vc->chan);
 
        spin_lock_init(&vc->lock);
-       INIT_LIST_HEAD(&vc->desc_allocated);
        INIT_LIST_HEAD(&vc->desc_submitted);
        INIT_LIST_HEAD(&vc->desc_issued);
        INIT_LIST_HEAD(&vc->desc_completed);
index 189e75dbcb15f95876a81848ecde8196a4c82905..181b95267866b605f521860f973aa3860d694fa0 100644 (file)
@@ -29,7 +29,6 @@ struct virt_dma_chan {
        spinlock_t lock;
 
        /* protected by vc.lock */
-       struct list_head desc_allocated;
        struct list_head desc_submitted;
        struct list_head desc_issued;
        struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
        struct virt_dma_desc *vd, unsigned long tx_flags)
 {
        extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
-       unsigned long flags;
 
        dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
        vd->tx.flags = tx_flags;
        vd->tx.tx_submit = vchan_tx_submit;
 
-       spin_lock_irqsave(&vc->lock, flags);
-       list_add_tail(&vd->node, &vc->desc_allocated);
-       spin_unlock_irqrestore(&vc->lock, flags);
-
        return &vd->tx;
 }
 
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
 }
 
 /**
- * vchan_get_all_descriptors - obtain all allocated, submitted and issued
- *                             descriptors
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
  * vc: virtual channel to get descriptors from
  * head: list of descriptors found
  *
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
 static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
        struct list_head *head)
 {
-       list_splice_tail_init(&vc->desc_allocated, head);
        list_splice_tail_init(&vc->desc_submitted, head);
        list_splice_tail_init(&vc->desc_issued, head);
        list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
 
 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
 {
-       struct virt_dma_desc *vd;
        unsigned long flags;
        LIST_HEAD(head);
 
        spin_lock_irqsave(&vc->lock, flags);
        vchan_get_all_descriptors(vc, &head);
-       list_for_each_entry(vd, &head, node)
-               async_tx_clear_ack(&vd->tx);
        spin_unlock_irqrestore(&vc->lock, flags);
 
        vchan_dma_desc_free_list(vc, &head);
index 620fd55ec7660b053a511b8237e54fc46118f407..dff22ab01851aadb37fd434a334cf6cc681cf29c 100644 (file)
 #define XGENE_DMA_MEM_RAM_SHUTDOWN             0xD070
 #define XGENE_DMA_BLK_MEM_RDY                  0xD074
 #define XGENE_DMA_BLK_MEM_RDY_VAL              0xFFFFFFFF
+#define XGENE_DMA_RING_CMD_SM_OFFSET           0x8000
 
 /* X-Gene SoC EFUSE csr register and bit defination */
 #define XGENE_SOC_JTAG1_SHADOW                 0x18
@@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
                return -ENOMEM;
        }
 
+       pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
+
        /* Get efuse csr region */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
        if (!res) {
index 3515b381c1312612f56953bc267ee7d5d23b0f84..711d8ad74f116ebdcc7fd3833fbc0672c7a6359b 100644 (file)
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
         */
 
        for (row = 0; row < mci->nr_csrows; row++) {
-               struct csrow_info *csi = &mci->csrows[row];
+               struct csrow_info *csi = mci->csrows[row];
 
                /*
                 * Get the configuration settings for this
index 080d5cc2705529962d2a62b17fe3f597b5bc41e3..eebdf2a33bfe4b84e1fc1886e7222641f4a56122 100644 (file)
@@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
        status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
        if (status) {
                dev_err(&pdev->dev, "failed to register extcon device\n");
-               kfree(palmas_usb->edev->name);
                return status;
        }
 
@@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
                if (status < 0) {
                        dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
                                        palmas_usb->id_irq, status);
-                       kfree(palmas_usb->edev->name);
                        return status;
                }
        }
@@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
                if (status < 0) {
                        dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
                                        palmas_usb->vbus_irq, status);
-                       kfree(palmas_usb->edev->name);
                        return status;
                }
        }
@@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int palmas_usb_remove(struct platform_device *pdev)
-{
-       struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
-
-       kfree(palmas_usb->edev->name);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int palmas_usb_suspend(struct device *dev)
 {
@@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = {
 
 static struct platform_driver palmas_usb_driver = {
        .probe = palmas_usb_probe,
-       .remove = palmas_usb_remove,
        .driver = {
                .name = "palmas-usb",
                .of_match_table = of_palmas_match_tbl,
index 76157ab9faf3ad84a16e738a4e338ad1bded8e3c..43b57b02d050d197fe7994ea744231b7a580eb23 100644 (file)
@@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
        return -EINVAL;
 }
 
-static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
 {
-       unsigned int id = EXTCON_NONE;
+       unsigned int id = -EINVAL;
        int i = 0;
 
-       if (edev->max_supported == 0)
-               return -EINVAL;
-
-       /* Find the the number of extcon cable */
+       /* Find the id of extcon cable */
        while (extcon_name[i]) {
                if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
                        id = i;
                        break;
                }
+               i++;
        }
 
-       if (id == EXTCON_NONE)
+       return id;
+}
+
+static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+{
+       unsigned int id;
+
+       if (edev->max_supported == 0)
                return -EINVAL;
 
+       /* Find the the number of extcon cable */
+       id = find_cable_id_by_name(edev, name);
+       if (id < 0)
+               return id;
+
        return find_cable_index_by_id(edev, id);
 }
 
@@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev,
        struct extcon_cable *cable = container_of(attr, struct extcon_cable,
                                                  attr_state);
 
+       int i = cable->cable_index;
+
        return sprintf(buf, "%d\n",
                       extcon_get_cable_state_(cable->edev,
-                                              cable->cable_index));
+                                              cable->edev->supported_cable[i]));
 }
 
 /**
@@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
        spin_lock_irqsave(&edev->lock, flags);
 
        if (edev->state != ((edev->state & ~mask) | (state & mask))) {
+               u32 old_state;
+
                if (check_mutually_exclusive(edev, (edev->state & ~mask) |
                                                   (state & mask))) {
                        spin_unlock_irqrestore(&edev->lock, flags);
                        return -EPERM;
                }
 
-               for (index = 0; index < edev->max_supported; index++) {
-                       if (is_extcon_changed(edev->state, state, index, &attached))
-                               raw_notifier_call_chain(&edev->nh[index], attached, edev);
-               }
-
+               old_state = edev->state;
                edev->state &= ~mask;
                edev->state |= state & mask;
 
+               for (index = 0; index < edev->max_supported; index++) {
+                       if (is_extcon_changed(old_state, edev->state, index,
+                                             &attached))
+                               raw_notifier_call_chain(&edev->nh[index],
+                                                       attached, edev);
+               }
+
                /* This could be in interrupt handler */
                prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
                if (prop_buf) {
@@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
  */
 int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
 {
-       return extcon_get_cable_state_(edev, find_cable_index_by_name
-                                               (edev, cable_name));
+       unsigned int id;
+
+       id = find_cable_id_by_name(edev, cable_name);
+       if (id < 0)
+               return id;
+
+       return extcon_get_cable_state_(edev, id);
 }
 EXPORT_SYMBOL_GPL(extcon_get_cable_state);
 
@@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
 int extcon_set_cable_state(struct extcon_dev *edev,
                        const char *cable_name, bool cable_state)
 {
-       return extcon_set_cable_state_(edev, find_cable_index_by_name
-                                       (edev, cable_name), cable_state);
+       unsigned int id;
+
+       id = find_cable_id_by_name(edev, cable_name);
+       if (id < 0)
+               return id;
+
+       return extcon_set_cable_state_(edev, id, cable_state);
 }
 EXPORT_SYMBOL_GPL(extcon_set_cable_state);
 
index 9fa8084a7c8d7d9e5aff23bdd372af512e39047a..d6144e3b97c54235ca45a0ad71872957a36ffe48 100644 (file)
@@ -58,6 +58,11 @@ bool efi_runtime_disabled(void)
 
 static int __init parse_efi_cmdline(char *str)
 {
+       if (!str) {
+               pr_warn("need at least one option\n");
+               return -EINVAL;
+       }
+
        if (parse_option_str(str, "noruntime"))
                disable_runtime = true;
 
index 31b00f91cfcd5a04848be288837d6d90c0110f44..f7b49d5ce4b81d471fa3c84280560b9d0e774c78 100644 (file)
@@ -1130,6 +1130,9 @@ struct amdgpu_gfx {
        uint32_t                        me_feature_version;
        uint32_t                        ce_feature_version;
        uint32_t                        pfp_feature_version;
+       uint32_t                        rlc_feature_version;
+       uint32_t                        mec_feature_version;
+       uint32_t                        mec2_feature_version;
        struct amdgpu_ring              gfx_ring[AMDGPU_MAX_GFX_RINGS];
        unsigned                        num_gfx_rings;
        struct amdgpu_ring              compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1639,6 +1642,7 @@ struct amdgpu_sdma {
        /* SDMA firmware */
        const struct firmware   *fw;
        uint32_t                fw_version;
+       uint32_t                feature_version;
 
        struct amdgpu_ring      ring;
 };
index 9736892bcdf932c328a883473a6c3e23d560cd38..3bfe67de834904628e0e4e11677c706c4848fde7 100644 (file)
@@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        break;
                case AMDGPU_INFO_FW_GFX_RLC:
                        fw_info.ver = adev->gfx.rlc_fw_version;
-                       fw_info.feature = 0;
+                       fw_info.feature = adev->gfx.rlc_feature_version;
                        break;
                case AMDGPU_INFO_FW_GFX_MEC:
-                       if (info->query_fw.index == 0)
+                       if (info->query_fw.index == 0) {
                                fw_info.ver = adev->gfx.mec_fw_version;
-                       else if (info->query_fw.index == 1)
+                               fw_info.feature = adev->gfx.mec_feature_version;
+                       } else if (info->query_fw.index == 1) {
                                fw_info.ver = adev->gfx.mec2_fw_version;
-                       else
+                               fw_info.feature = adev->gfx.mec2_feature_version;
+                       } else
                                return -EINVAL;
-                       fw_info.feature = 0;
                        break;
                case AMDGPU_INFO_FW_SMC:
                        fw_info.ver = adev->pm.fw_version;
@@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        if (info->query_fw.index >= 2)
                                return -EINVAL;
                        fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
-                       fw_info.feature = 0;
+                       fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
                        break;
                default:
                        return -EINVAL;
index 2f7a5efa21c23ab0fda25ee0ebbb360efae966ea..f5c22556ec2c17ff145c48440dfe5e3563e67606 100644 (file)
@@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
        unsigned height_in_mb = ALIGN(height / 16, 2);
        unsigned fs_in_mb = width_in_mb * height_in_mb;
 
-       unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+       unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
 
        image_size = width * height;
        image_size += image_size / 2;
@@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
 
                num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
                min_dpb_size = image_size * num_dpb_buffer;
+               min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
+                                          * 16 * num_dpb_buffer + 52 * 1024;
                break;
 
        default:
@@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
 
        buf_sizes[0x1] = dpb_size;
        buf_sizes[0x2] = image_size;
+       buf_sizes[0x4] = min_ctx_size;
        return 0;
 }
 
@@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
                        return -EINVAL;
                }
 
+       } else if (cmd == 0x206) {
+               if ((end - start) < ctx->buf_sizes[4]) {
+                       DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+                                         (unsigned)(end - start),
+                                         ctx->buf_sizes[4]);
+                       return -EINVAL;
+               }
        } else if ((cmd != 0x100) && (cmd != 0x204)) {
                DRM_ERROR("invalid UVD command %X!\n", cmd);
                return -EINVAL;
@@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
        struct amdgpu_uvd_cs_ctx ctx = {};
        unsigned buf_sizes[] = {
                [0x00000000]    =       2048,
-               [0x00000001]    =       32 * 1024 * 1024,
-               [0x00000002]    =       2048 * 1152 * 3,
+               [0x00000001]    =       0xFFFFFFFF,
+               [0x00000002]    =       0xFFFFFFFF,
                [0x00000003]    =       2048,
+               [0x00000004]    =       0xFFFFFFFF,
        };
        struct amdgpu_ib *ib = &parser->ibs[ib_idx];
        int r;
index ab83cc1ca4cc04865b0bf918c410a4351496fb22..15df46c93f0a3d9e0810b9018ba761bfaa2cc418 100644 (file)
@@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
                amdgpu_ucode_print_sdma_hdr(&hdr->header);
                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
                adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
                fw_data = (const __le32 *)
                        (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
index 2db6ab0a543dada20b64d3d5d89eb4d5f36a1873..0d8bf2cb195603b8be90346a58eabfee62670d23 100644 (file)
@@ -3080,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
        mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
        amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
        adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
+       adev->gfx.mec_feature_version = le32_to_cpu(
+                                       mec_hdr->ucode_feature_version);
 
        gfx_v7_0_cp_compute_enable(adev, false);
 
@@ -3102,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
                mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
                amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
                adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
+               adev->gfx.mec2_feature_version = le32_to_cpu(
+                               mec2_hdr->ucode_feature_version);
 
                /* MEC2 */
                fw_data = (const __le32 *)
@@ -4066,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
        hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
        amdgpu_ucode_print_rlc_hdr(&hdr->header);
        adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
+       adev->gfx.rlc_feature_version = le32_to_cpu(
+                                       hdr->ucode_feature_version);
 
        gfx_v7_0_rlc_stop(adev);
 
@@ -5122,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle)
                dev_info(adev->dev, "  CP_HPD_EOP_CONTROL=0x%08X\n",
                         RREG32(mmCP_HPD_EOP_CONTROL));
 
-               for (queue = 0; queue < 8; i++) {
+               for (queue = 0; queue < 8; queue++) {
                        cik_srbm_select(adev, me, pipe, queue, 0);
                        dev_info(adev->dev, "  queue: %d\n", queue);
                        dev_info(adev->dev, "  CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
index 9e1d4ddbf475027e10c6e0d6d77a63efb4eec3b3..20e2cfd521d5352202070f357de89234175cb800 100644 (file)
@@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        int err;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
+       const struct gfx_firmware_header_v1_0 *cp_hdr;
 
        DRM_DEBUG("\n");
 
@@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
        if (err)
                goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+       adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
        err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        err = amdgpu_ucode_validate(adev->gfx.me_fw);
        if (err)
                goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+       adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
        err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        err = amdgpu_ucode_validate(adev->gfx.ce_fw);
        if (err)
                goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+       adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
        err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
        if (err)
                goto out;
        err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+       adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
        err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        err = amdgpu_ucode_validate(adev->gfx.mec_fw);
        if (err)
                goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+       adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
        err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
                err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
                if (err)
                        goto out;
+               cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+                                               adev->gfx.mec2_fw->data;
+               adev->gfx.mec2_fw_version = le32_to_cpu(
+                                               cp_hdr->header.ucode_version);
+               adev->gfx.mec2_feature_version = le32_to_cpu(
+                                               cp_hdr->ucode_feature_version);
        } else {
                err = 0;
                adev->gfx.mec2_fw = NULL;
@@ -1983,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                adev->gfx.config.max_shader_engines = 1;
                adev->gfx.config.max_tile_pipes = 2;
                adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
 
                switch (adev->pdev->revision) {
                case 0xc4:
@@ -1991,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                case 0xcc:
                        /* B10 */
                        adev->gfx.config.max_cu_per_sh = 8;
-                       adev->gfx.config.max_backends_per_se = 2;
                        break;
                case 0xc5:
                case 0x81:
@@ -2000,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                case 0xcd:
                        /* B8 */
                        adev->gfx.config.max_cu_per_sh = 6;
-                       adev->gfx.config.max_backends_per_se = 2;
                        break;
                case 0xc6:
                case 0xca:
                case 0xce:
                        /* B6 */
                        adev->gfx.config.max_cu_per_sh = 6;
-                       adev->gfx.config.max_backends_per_se = 2;
                        break;
                case 0xc7:
                case 0x87:
@@ -2015,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                default:
                        /* B4 */
                        adev->gfx.config.max_cu_per_sh = 4;
-                       adev->gfx.config.max_backends_per_se = 1;
                        break;
                }
 
@@ -2275,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
 
        hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
        amdgpu_ucode_print_rlc_hdr(&hdr->header);
-       adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
                           le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2361,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
        amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
        amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
        amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
-       adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
-       adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
-       adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
-       adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
-       adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
-       adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
 
        gfx_v8_0_cp_gfx_enable(adev, false);
 
@@ -2622,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
 
        mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
        amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
-       adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
 
        fw_data = (const __le32 *)
                (adev->gfx.mec_fw->data +
@@ -2641,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
 
                mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
                amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
-               adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
 
                fw_data = (const __le32 *)
                        (adev->gfx.mec2_fw->data +
@@ -3125,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
                                WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
                                       AMDGPU_DOORBELL_KIQ << 2);
                                WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
-                                               0x7FFFF << 2);
+                                      AMDGPU_DOORBELL_MEC_RING7 << 2);
                        }
                        tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
                        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
index d7895885fe0cf3b3e7cd5d1ae52f291053420b17..a988dfb1d3942e9246361bfd7b97bdabc5e5286c 100644 (file)
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
        int err, i;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
+       const struct sdma_firmware_header_v1_0 *hdr;
 
        DRM_DEBUG("\n");
 
@@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
                err = amdgpu_ucode_validate(adev->sdma[i].fw);
                if (err)
                        goto out;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
                        hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
                        amdgpu_ucode_print_sdma_hdr(&hdr->header);
                        fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
-                       adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
                        fw_data = (const __le32 *)
                                (adev->sdma[i].fw->data +
                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
index 7bb37b93993fb5312eb2d46189bf09bf789c3989..2b86569b18d3656c87975175a1ff771599c958d6 100644 (file)
@@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
        int err, i;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
+       const struct sdma_firmware_header_v1_0 *hdr;
 
        DRM_DEBUG("\n");
 
@@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
                err = amdgpu_ucode_validate(adev->sdma[i].fw);
                if (err)
                        goto out;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
                hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
                amdgpu_ucode_print_sdma_hdr(&hdr->header);
                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
-               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
                fw_data = (const __le32 *)
                        (adev->sdma[i].fw->data +
                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
index 5b59d5ad7d1c23fc41c6d657f19367b81d1ae1de..9dcc7280e5720255baed2786ab7d8fc11554c845 100644 (file)
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        }
 
        funcs = connector->helper_private;
-       new_encoder = funcs->best_encoder(connector);
+
+       if (funcs->atomic_best_encoder)
+               new_encoder = funcs->atomic_best_encoder(connector,
+                                                        connector_state);
+       else
+               new_encoder = funcs->best_encoder(connector);
 
        if (!new_encoder) {
                DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,6 +234,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
                }
        }
 
+       if (WARN_ON(!connector_state->crtc))
+               return -EINVAL;
+
        connector_state->best_encoder = new_encoder;
        idx = drm_crtc_index(connector_state->crtc);
 
index 778bbb6425b80c9c8affddad58d993a93755c39e..eb603f1defc2250ea158864ea4371e24655138e1 100644 (file)
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
                   from an EDID retrieval */
                if (port->connector) {
                        mutex_lock(&mgr->destroy_connector_lock);
-                       list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+                       list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
+                       return;
                }
                drm_dp_port_teardown_pdt(port, port->pdt);
 
@@ -1294,7 +1295,6 @@ retry:
                                goto retry;
                        }
                        DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
-                       WARN(1, "fail\n");
 
                        return -EIO;
                }
@@ -2660,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
 static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
-       struct drm_connector *connector;
+       struct drm_dp_mst_port *port;
 
        /*
         * Not a regular list traverse as we have to drop the destroy
@@ -2669,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
         */
        for (;;) {
                mutex_lock(&mgr->destroy_connector_lock);
-               connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
-               if (!connector) {
+               port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
+               if (!port) {
                        mutex_unlock(&mgr->destroy_connector_lock);
                        break;
                }
-               list_del(&connector->destroy_list);
+               list_del(&port->next);
                mutex_unlock(&mgr->destroy_connector_lock);
 
-               mgr->cbs->destroy_connector(mgr, connector);
+               mgr->cbs->destroy_connector(mgr, port->connector);
+
+               drm_dp_port_teardown_pdt(port, port->pdt);
+
+               if (!port->input && port->vcpi.vcpi > 0)
+                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+               kfree(port);
        }
 }
 
index f9cc68fbd2a3e18b076ad2ced71b8f1ddf002202..b50fa0afd9071f6c64c36de23253a2ee22ce7480 100644 (file)
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
 
 static void store_vblank(struct drm_device *dev, int crtc,
-                        unsigned vblank_count_inc,
+                        u32 vblank_count_inc,
                         struct timeval *t_vblank)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
index 842d6b8dc3c435ee7d836402d4169847aef75d31..2a652359af644b51f257cde7528d70b6016897da 100644 (file)
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
        spin_lock_init(&ctx->lock);
        platform_set_drvdata(pdev, ctx);
 
-       pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
        ret = exynos_drm_ippdrv_register(ippdrv);
index 8040ed2a831f9a6f226baf8aee3ce00b213be8e6..f1c6b76c127f4db02388267775431fcd25ac7eb8 100644 (file)
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
index 99e286489031c4a2931565823e0158428548aef2..4a00990e4ae4e8459b94a9a007044af1cc11af62 100644 (file)
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
 {
        struct hdmi_context *hdata = ctx_from_connector(connector);
        struct edid *edid;
+       int ret;
 
        if (!hdata->ddc_adpt)
                return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
 
        drm_mode_connector_update_edid_property(connector, edid);
 
-       return drm_add_edid_modes(connector, edid);
+       ret = drm_add_edid_modes(connector, edid);
+
+       kfree(edid);
+
+       return ret;
 }
 
 static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
index cae98db3306205e2628b2090b731cf6cfdf79d4f..4706b56902b44f5ba205b30d3aa6e53678bbad52 100644 (file)
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
        /* handling VSYNC */
        if (val & MXR_INT_STATUS_VSYNC) {
+               /* vsync interrupt use different bit for read and clear */
+               val |= MXR_INT_CLEAR_VSYNC;
+               val &= ~MXR_INT_STATUS_VSYNC;
+
                /* interlace scan need to check shadow register */
                if (ctx->interlace) {
                        base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
 out:
        /* clear interrupts */
-       if (~val & MXR_INT_EN_VSYNC) {
-               /* vsync interrupt use different bit for read and clear */
-               val &= ~MXR_INT_EN_VSYNC;
-               val |= MXR_INT_CLEAR_VSYNC;
-       }
        mixer_reg_write(res, MXR_INT_STATUS, val);
 
        spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
        }
 
        /* enable vsync interrupt */
-       mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
-                       MXR_INT_EN_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
 
        return 0;
 }
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
        struct mixer_context *mixer_ctx = crtc->ctx;
        struct mixer_resources *res = &mixer_ctx->mixer_res;
 
+       if (!mixer_ctx->powered) {
+               mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+               return;
+       }
+
        /* disable vsync interrupt */
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
+       if (ctx->int_en & MXR_INT_EN_VSYNC)
+               mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
        mixer_win_reset(ctx);
 }
index fe1599d75f14e39b2a39364b78d088d4715c368f..424228be79ae5b2aa1557ca07331e4e49e665ef8 100644 (file)
@@ -606,8 +606,6 @@ static void
 tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
                 uint8_t *buf, size_t size)
 {
-       buf[PB(0)] = tda998x_cksum(buf, size);
-
        reg_clear(priv, REG_DIP_IF_FLAGS, bit);
        reg_write_range(priv, addr, buf, size);
        reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
        buf[PB(4)] = p->audio_frame[4];
        buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
 
+       buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
+
        tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
                         sizeof(buf));
 }
index 7ed8033aae6097af69d90e83bb6c97f7dc6f7225..8e35e0d013df556d8ac04fc27f9ba2bd7354fae3 100644 (file)
@@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
                        struct drm_atomic_state *state,
                        bool async)
 {
-       int ret;
-       int i;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+       int ret, i;
 
        if (async) {
                DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
                return ret;
 
        /* Point of no return */
-
-       /*
-        * FIXME:  The proper sequence here will eventually be:
-        *
-        * drm_atomic_helper_swap_state(dev, state)
-        * drm_atomic_helper_commit_modeset_disables(dev, state);
-        * drm_atomic_helper_commit_planes(dev, state);
-        * drm_atomic_helper_commit_modeset_enables(dev, state);
-        * drm_atomic_helper_wait_for_vblanks(dev, state);
-        * drm_atomic_helper_cleanup_planes(dev, state);
-        * drm_atomic_state_free(state);
-        *
-        * once we have full atomic modeset.  For now, just manually update
-        * plane states to avoid clobbering good states with dummy states
-        * while nuclear pageflipping.
-        */
-       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
-               struct drm_plane *plane = state->planes[i];
-
-               if (!plane)
-                       continue;
-
-               plane->state->state = state;
-               swap(state->plane_states[i], plane->state);
-               plane->state->state = NULL;
-       }
+       drm_atomic_helper_swap_state(dev, state);
 
        /* swap crtc_scaler_state */
-       for (i = 0; i < dev->mode_config.num_crtc; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-               if (!crtc) {
-                       continue;
-               }
-
-               to_intel_crtc(crtc)->config->scaler_state =
-                       to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
 
                if (INTEL_INFO(dev)->gen >= 9)
                        skl_detach_scalers(to_intel_crtc(crtc));
+
+               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
        }
 
-       drm_atomic_helper_commit_planes(dev, state);
        drm_atomic_helper_wait_for_vblanks(dev, state);
        drm_atomic_helper_cleanup_planes(dev, state);
        drm_atomic_state_free(state);
index 198fc3c3291b2ac05540ea36ef853c9826b23efa..3dcd59e694db9e6f32c8e49ea04cbf21bbdc0ad8 100644 (file)
@@ -1075,15 +1075,34 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
        const union child_device_config *p_child;
        union child_device_config *child_dev_ptr;
        int i, child_device_num, count;
-       u16     block_size;
+       u8 expected_size;
+       u16 block_size;
 
        p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
        if (!p_defs) {
                DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
                return;
        }
-       if (p_defs->child_dev_size < sizeof(*p_child)) {
-               DRM_ERROR("General definiton block child device size is too small.\n");
+       if (bdb->version < 195) {
+               expected_size = 33;
+       } else if (bdb->version == 195) {
+               expected_size = 37;
+       } else if (bdb->version <= 197) {
+               expected_size = 38;
+       } else {
+               expected_size = 38;
+               DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
+                                expected_size, bdb->version);
+       }
+
+       if (expected_size > sizeof(*p_child)) {
+               DRM_ERROR("child_device_config cannot fit in p_child\n");
+               return;
+       }
+
+       if (p_defs->child_dev_size != expected_size) {
+               DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
+                         p_defs->child_dev_size, expected_size, bdb->version);
                return;
        }
        /* get the block size of general definitions */
@@ -1130,7 +1149,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 
                child_dev_ptr = dev_priv->vbt.child_dev + count;
                count++;
-               memcpy(child_dev_ptr, p_child, sizeof(*p_child));
+               memcpy(child_dev_ptr, p_child, p_defs->child_dev_size);
        }
        return;
 }
index 30e0f54ba19d1284107958bb6e5d49f6309b63de..87476ff181ddbef0967d948c37119cfcbd758315 100644 (file)
@@ -11826,7 +11826,9 @@ encoder_retry:
                goto encoder_retry;
        }
 
-       pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+       /* Dithering seems to not pass-through bits correctly when it should, so
+        * only enable it on 6bpc panels. */
+       pipe_config->dither = pipe_config->pipe_bpp == 6*3;
        DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
@@ -12624,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
 
        modeset_update_crtc_power_domains(state);
 
-       drm_atomic_helper_commit_planes(dev, state);
-
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!needs_modeset(crtc->state) || !crtc->state->enable)
+               if (!needs_modeset(crtc->state) || !crtc->state->enable) {
+                       drm_atomic_helper_commit_planes_on_crtc(crtc_state);
                        continue;
+               }
 
                update_scanline_offset(to_intel_crtc(crtc));
 
                dev_priv->display.crtc_enable(crtc);
-               intel_crtc_enable_planes(crtc);
+               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
        }
 
        /* FIXME: add subpixel order */
@@ -12891,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
        return 0;
 }
 
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
-       struct intel_plane_state *plane_state =
-               to_intel_plane_state(crtc->primary->state);
-
-       return plane_state->visible;
-}
-
 static int intel_crtc_set_config(struct drm_mode_set *set)
 {
        struct drm_device *dev;
        struct drm_atomic_state *state = NULL;
        struct intel_crtc_state *pipe_config;
-       bool primary_plane_was_visible;
        int ret;
 
        BUG_ON(!set);
@@ -12943,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
 
        intel_update_pipe_size(to_intel_crtc(set->crtc));
 
-       primary_plane_was_visible = primary_plane_visible(set->crtc);
-
        ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
 
-       if (ret == 0 &&
-           pipe_config->base.enable &&
-           pipe_config->base.planes_changed &&
-           !needs_modeset(&pipe_config->base)) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
-
-               /*
-                * We need to make sure the primary plane is re-enabled if it
-                * has previously been turned off.
-                */
-               if (ret == 0 && !primary_plane_was_visible &&
-                   primary_plane_visible(set->crtc)) {
-                       WARN_ON(!intel_crtc->active);
-                       intel_post_enable_primary(set->crtc);
-               }
-
-               /*
-                * In the fastboot case this may be our only check of the
-                * state after boot.  It would be better to only do it on
-                * the first update, but we don't have a nice way of doing that
-                * (and really, set_config isn't used much for high freq page
-                * flipping, so increasing its cost here shouldn't be a big
-                * deal).
-                */
-               if (i915.fastboot && ret == 0)
-                       intel_modeset_check_state(set->crtc->dev);
-       }
-
        if (ret) {
                DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
                              set->crtc->base.id, ret);
@@ -13305,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
                         */
                        if (IS_BROADWELL(dev))
                                intel_crtc->atomic.wait_vblank = true;
+
+                       if (crtc_state)
+                               intel_crtc->atomic.post_enable_primary = true;
                }
 
                /*
@@ -13317,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
                if (!state->visible || !fb)
                        intel_crtc->atomic.disable_ips = true;
 
+               if (!state->visible && old_state->visible &&
+                   crtc_state && !needs_modeset(&crtc_state->base))
+                       intel_crtc->atomic.pre_disable_primary = true;
+
                intel_crtc->atomic.fb_bits |=
                        INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
 
@@ -15034,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                struct intel_plane_state *plane_state;
 
                memset(crtc->config, 0, sizeof(*crtc->config));
+               crtc->config->base.crtc = &crtc->base;
 
                crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
 
index 6e4cc5334f47d7105b60c0bec72fccfb4875ddac..600afdbef8c9a434f51d527c5d85e202c36bae2b 100644 (file)
@@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
+static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
+                                                        struct drm_connector_state *state)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_dp *intel_dp = intel_connector->mst_port;
+       struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+
+       return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+}
+
 static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
 static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
        .get_modes = intel_dp_mst_get_modes,
        .mode_valid = intel_dp_mst_mode_valid,
+       .atomic_best_encoder = intel_mst_atomic_best_encoder,
        .best_encoder = intel_mst_best_encoder,
 };
 
index 52c22b02600598cfa7d18e424d69a99cce4879e7..e10f9644140f5d9fcd6e73446c74634d2b13906a 100644 (file)
@@ -165,31 +165,15 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
        return 0;
 }
 
-static int
-gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
-{
-       struct nvkm_object *obj = (void *)chan;
-       struct gk104_fifo_priv *priv = (void *)obj->engine;
-
-       nv_wr32(priv, 0x002634, chan->base.chid);
-       if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
-               nv_error(priv, "channel %d [%s] kick timeout\n",
-                        chan->base.chid, nvkm_client_name(chan));
-               return -EBUSY;
-       }
-
-       return 0;
-}
-
 static int
 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                          struct nvkm_object *object)
 {
        struct nvkm_bar *bar = nvkm_bar(parent);
+       struct gk104_fifo_priv *priv = (void *)parent->engine;
        struct gk104_fifo_base *base = (void *)parent->parent;
        struct gk104_fifo_chan *chan = (void *)parent;
        u32 addr;
-       int ret;
 
        switch (nv_engidx(object->engine)) {
        case NVDEV_ENGINE_SW    : return 0;
@@ -204,9 +188,13 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       ret = gk104_fifo_chan_kick(chan);
-       if (ret && suspend)
-               return ret;
+       nv_wr32(priv, 0x002634, chan->base.chid);
+       if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+               nv_error(priv, "channel %d [%s] kick timeout\n",
+                        chan->base.chid, nvkm_client_name(chan));
+               if (suspend)
+                       return -EBUSY;
+       }
 
        if (addr) {
                nv_wo32(base, addr + 0x00, 0x00000000);
@@ -331,7 +319,6 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
                gk104_fifo_runlist_update(priv, chan->engine);
        }
 
-       gk104_fifo_chan_kick(chan);
        nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
        return nvkm_fifo_channel_fini(&chan->base, suspend);
 }
index 654c8daeb5ab3d0dd84a2ed1d32af633d6955ac9..97ad3bcb99a75a441a54150f779415dc59236ac7 100644 (file)
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
                                     true, NULL);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_err_nores;
 
        ret = vmw_validate_buffers(dev_priv, sw_context);
        if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        vmw_resource_relocations_free(&sw_context->res_relocations);
 
        vmw_fifo_commit(dev_priv, command_size);
+       mutex_unlock(&dev_priv->binding_mutex);
 
        vmw_query_bo_switch_commit(dev_priv, sw_context);
        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                DRM_ERROR("Fence submission error. Syncing.\n");
 
        vmw_resource_list_unreserve(&sw_context->resource_list, false);
-       mutex_unlock(&dev_priv->binding_mutex);
 
        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
                                    (void *) fence);
index 3511bbaba505a4524ad382297ec1e486e21e7e48..e3c63640df737d5527c6d2609622417a4e03c8d3 100644 (file)
@@ -462,12 +462,15 @@ out:
 
 static void hidinput_cleanup_battery(struct hid_device *dev)
 {
+       const struct power_supply_desc *psy_desc;
+
        if (!dev->battery)
                return;
 
+       psy_desc = dev->battery->desc;
        power_supply_unregister(dev->battery);
-       kfree(dev->battery->desc->name);
-       kfree(dev->battery->desc);
+       kfree(psy_desc->name);
+       kfree(psy_desc);
        dev->battery = NULL;
 }
 #else  /* !CONFIG_HID_BATTERY_STRENGTH */
index 94167310e15a4c95001d339d4da3b7319f9005eb..b905d501e752d607b6fc1731ad89ff3d23ce7cd0 100644 (file)
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
        for (p = drvdata->rdesc;
             p <= drvdata->rdesc + drvdata->rsize - 4;) {
                if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
-                   p[3] < sizeof(params)) {
+                   p[3] < ARRAY_SIZE(params)) {
                        v = params[p[3]];
                        put_unaligned(cpu_to_le32(v), (s32 *)p);
                        p += 4;
index 44958d79d598dfc3a7e6938a2babbf3e1fdc2188..01b937e63cf37ec1424a1aad9eee0caef682c010 100644 (file)
@@ -1284,6 +1284,39 @@ fail_register_pen_input:
        return error;
 }
 
+/*
+ * Not all devices report physical dimensions from HID.
+ * Compute the default from hardcoded logical dimension
+ * and resolution before driver overwrites them.
+ */
+static void wacom_set_default_phy(struct wacom_features *features)
+{
+       if (features->x_resolution) {
+               features->x_phy = (features->x_max * 100) /
+                                       features->x_resolution;
+               features->y_phy = (features->y_max * 100) /
+                                       features->y_resolution;
+       }
+}
+
+static void wacom_calculate_res(struct wacom_features *features)
+{
+       /* set unit to "100th of a mm" for devices not reported by HID */
+       if (!features->unit) {
+               features->unit = 0x11;
+               features->unitExpo = -3;
+       }
+
+       features->x_resolution = wacom_calc_hid_res(features->x_max,
+                                                   features->x_phy,
+                                                   features->unit,
+                                                   features->unitExpo);
+       features->y_resolution = wacom_calc_hid_res(features->y_max,
+                                                   features->y_phy,
+                                                   features->unit,
+                                                   features->unitExpo);
+}
+
 static void wacom_wireless_work(struct work_struct *work)
 {
        struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1341,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work)
                if (wacom_wac1->features.type != INTUOSHT &&
                    wacom_wac1->features.type != BAMBOO_PT)
                        wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
+               wacom_set_default_phy(&wacom_wac1->features);
+               wacom_calculate_res(&wacom_wac1->features);
                snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
                         wacom_wac1->features.name);
                snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1359,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work)
                        wacom_wac2->features =
                                *((struct wacom_features *)id->driver_data);
                        wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+                       wacom_set_default_phy(&wacom_wac2->features);
                        wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+                       wacom_calculate_res(&wacom_wac2->features);
                        snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
                                 "%s (WL) Finger",wacom_wac2->features.name);
                        snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1407,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work)
        }
 }
 
-/*
- * Not all devices report physical dimensions from HID.
- * Compute the default from hardcoded logical dimension
- * and resolution before driver overwrites them.
- */
-static void wacom_set_default_phy(struct wacom_features *features)
-{
-       if (features->x_resolution) {
-               features->x_phy = (features->x_max * 100) /
-                                       features->x_resolution;
-               features->y_phy = (features->y_max * 100) /
-                                       features->y_resolution;
-       }
-}
-
-static void wacom_calculate_res(struct wacom_features *features)
-{
-       /* set unit to "100th of a mm" for devices not reported by HID */
-       if (!features->unit) {
-               features->unit = 0x11;
-               features->unitExpo = -3;
-       }
-
-       features->x_resolution = wacom_calc_hid_res(features->x_max,
-                                                   features->x_phy,
-                                                   features->unit,
-                                                   features->unitExpo);
-       features->y_resolution = wacom_calc_hid_res(features->y_max,
-                                                   features->y_phy,
-                                                   features->unit,
-                                                   features->unitExpo);
-}
-
 static size_t wacom_compute_pktlen(struct hid_device *hdev)
 {
        struct hid_report_enum *report_enum;
index 37c16afe007a0524eaacb5edcae9399bebfae897..c8487894b31236cefd761b24cac48fb4e17e6d52 100644 (file)
@@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
+static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+       {
+               /*
+                * CPU fan speed going up and down on Dell Studio XPS 8100
+                * for unknown reasons.
+                */
+               .ident = "Dell Studio XPS 8100",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
+               },
+       },
+       { }
+};
+
 /*
  * Probe for the presence of a supported laptop.
  */
@@ -940,7 +955,8 @@ static int __init i8k_probe(void)
        /*
         * Get DMI information
         */
-       if (!dmi_check_system(i8k_dmi_table)) {
+       if (!dmi_check_system(i8k_dmi_table) ||
+           dmi_check_system(i8k_blacklist_dmi_table)) {
                if (!ignore_dmi && !force)
                        return -ENODEV;
 
index 9b55e673b67caf1365c7452ce51a22a37510af02..85d106fe3ce8628061901b53240e546a884cbea0 100644 (file)
@@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = {
        { .compatible = "gmt,g763" },
        { },
 };
+MODULE_DEVICE_TABLE(of, g762_dt_match);
 
 /*
  * Grab clock (a required property), enable it, get (fixed) clock frequency
index 6153df735e82ca4fd3d605e159510675410546fc..08ff89d222e5ff79a3c5cf37fa1b7729f70fe303 100644 (file)
@@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
        {"nct7904", 0},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, nct7904_id);
 
 static struct i2c_driver nct7904_driver = {
        .class = I2C_CLASS_HWMON,
index af162b4c7a6d9b8b30756c53147eb3de458b3d5e..025686d4164058498216862d37af9ad114fe4fa2 100644 (file)
@@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, iface);
 
-       dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, "
+       dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, "
                "regs_base@%p\n", iface->regs_base);
 
        return 0;
@@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init);
 module_exit(i2c_bfin_twi_exit);
 
 MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
-MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver");
+MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:i2c-bfin-twi");
index d1c22e3fdd146a34d96adcdd35b81a6e15984824..fc9bf7f30e355dfadfcadd4f7d8f187f818e9566 100644 (file)
@@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap)
        u32 reg;
 
        reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+       /* enable test mode */
        reg |= OMAP_I2C_SYSTEST_ST_EN;
+       /* select SDA/SCL IO mode */
+       reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT;
+       /* set SCL to high-impedance state (reset value is 0) */
+       reg |= OMAP_I2C_SYSTEST_SCL_O;
+       /* set SDA to high-impedance state (reset value is 0) */
+       reg |= OMAP_I2C_SYSTEST_SDA_O;
        omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
 }
 
@@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap)
        u32 reg;
 
        reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+       /* restore reset values */
        reg &= ~OMAP_I2C_SYSTEST_ST_EN;
+       reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK;
+       reg &= ~OMAP_I2C_SYSTEST_SCL_O;
+       reg &= ~OMAP_I2C_SYSTEST_SDA_O;
        omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
 }
 
index e6d4935161e4902762f6042847838428ec34faf2..c83e4d13cfc5c402dfdea64df08f399ab486822b 100644 (file)
@@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
        if (bri->prepare_recovery)
                bri->prepare_recovery(adap);
 
+       bri->set_scl(adap, val);
+       ndelay(RECOVERY_NDELAY);
+
        /*
         * By this time SCL is high, as we need to give 9 falling-rising edges
         */
@@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
 
 int i2c_generic_scl_recovery(struct i2c_adapter *adap)
 {
-       adap->bus_recovery_info->set_scl(adap, 1);
        return i2c_generic_recovery(adap);
 }
 EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
@@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data)
 struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
 {
        struct device *dev;
+       struct i2c_client *client;
 
-       dev = bus_find_device(&i2c_bus_type, NULL, node,
-                                        of_dev_node_match);
+       dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
        if (!dev)
                return NULL;
 
-       return i2c_verify_client(dev);
+       client = i2c_verify_client(dev);
+       if (!client)
+               put_device(dev);
+
+       return client;
 }
 EXPORT_SYMBOL(of_find_i2c_device_by_node);
 
@@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node);
 struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
 {
        struct device *dev;
+       struct i2c_adapter *adapter;
 
-       dev = bus_find_device(&i2c_bus_type, NULL, node,
-                                        of_dev_node_match);
+       dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
        if (!dev)
                return NULL;
 
-       return i2c_verify_adapter(dev);
+       adapter = i2c_verify_adapter(dev);
+       if (!adapter)
+               put_device(dev);
+
+       return adapter;
 }
 EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
 #else
index 8223746546093c7a08f4bdfc8425459d88fe1a52..1da44961477953038e78409169f80a3f4884f89a 100644 (file)
@@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
        struct eeprom_data *eeprom;
        unsigned long flags;
 
-       if (off + count > attr->size)
-               return -EFBIG;
-
        eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
 
        spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
        struct eeprom_data *eeprom;
        unsigned long flags;
 
-       if (off + count > attr->size)
-               return -EFBIG;
-
        eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
 
        spin_lock_irqsave(&eeprom->buffer_lock, flags);
index c7aab48f07cdfcdebf3efb6374416619c9095e04..92d518382a9fce90c3e1dbae45034675072da274 100644 (file)
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                        printk(KERN_ERR MOD
                               "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
                               CQE_STATUS(&cqe), CQE_QPID(&cqe));
-                       ret = -EINVAL;
+                       wc->status = IB_WC_FATAL_ERR;
                }
        }
 out:
index 27b6a3ce18caf2e996177e6c313fff2b21a4ad19..891797ad76bccda3ae132e1fc59483b539e522ee 100644 (file)
@@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
                if (n_buttons[i] < 1)
                        continue;
 
-               if (n_buttons[i] > 6) {
+               if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) {
                        printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]);
                        err = -EINVAL;
                        goto err_unreg_devs;
index 10e140af5aac1a9ea309d2b237af065cc7abf684..1ac898db303afe84edd003a03129eb0f27518837 100644 (file)
@@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver);
 MODULE_DESCRIPTION("axp20x Power Button");
 MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:axp20x-pek");
index fc17b9592f5435238d980cc30d266a4ee399415a..10c4e3d462f112f15ec9843093c5f988d44780b9 100644 (file)
@@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
        if (pdata && pdata->coexist)
                return true;
 
-       if (of_find_node_by_name(node, "codec")) {
+       node = of_find_node_by_name(node, "codec");
+       if (node) {
                of_node_put(node);
                return true;
        }
index 113d6f1516a54956f74635f7eb51231ab5490052..4d246861d692b810f3074aa7917cda86893ac6c2 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/input/mt.h>
 #include <linux/serio.h>
 #include <linux/libps2.h>
+#include <linux/dmi.h>
 
 #include "psmouse.h"
 #include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
 #define ALPS_FOUR_BUTTONS      0x40    /* 4 direction button present */
 #define ALPS_PS2_INTERLEAVED   0x80    /* 3-byte PS/2 packet interleaved with
                                           6-byte ALPS packet */
+#define ALPS_DELL              0x100   /* device is a Dell laptop */
 #define ALPS_BUTTONPAD         0x200   /* device is a clickpad */
 
 static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
                return;
        }
 
-       /* Non interleaved V2 dualpoint has separate stick button bits */
+       /* Dell non interleaved V2 dualpoint has separate stick button bits */
        if (priv->proto_version == ALPS_PROTO_V2 &&
-           priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
+           priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
                left |= packet[0] & 1;
                right |= packet[0] & 2;
                middle |= packet[0] & 4;
@@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
        priv->byte0 = protocol->byte0;
        priv->mask0 = protocol->mask0;
        priv->flags = protocol->flags;
+       if (dmi_name_in_vendors("Dell"))
+               priv->flags |= ALPS_DELL;
 
        priv->x_max = 2000;
        priv->y_max = 1400;
index 22b9ca901f4e96c22499ce9723c0d2bd897c6fbb..2955f1d0ca6c4c9137f786028ca36bff706beab2 100644 (file)
@@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
        struct elantech_data *etd = psmouse->private;
        unsigned char *packet = psmouse->packet;
        unsigned char packet_type = packet[3] & 0x03;
+       unsigned int ic_version;
        bool sanity_check;
 
        if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
                return PACKET_TRACKPOINT;
 
+       /* This represents the version of IC body. */
+       ic_version = (etd->fw_version & 0x0f0000) >> 16;
+
        /*
         * Sanity check based on the constant bits of a packet.
         * The constant bits change depending on the value of
-        * the hardware flag 'crc_enabled' but are the same for
-        * every packet, regardless of the type.
+        * the hardware flag 'crc_enabled' and the version of
+        * the IC body, but are the same for every packet,
+        * regardless of the type.
         */
        if (etd->crc_enabled)
                sanity_check = ((packet[3] & 0x08) == 0x00);
+       else if (ic_version == 7 && etd->samples[1] == 0x2A)
+               sanity_check = ((packet[3] & 0x1c) == 0x10);
        else
                sanity_check = ((packet[0] & 0x0c) == 0x04 &&
                                (packet[3] & 0x1c) == 0x10);
@@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
+ * Fujitsu T725            0x470f01        05, 12, 09      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
  * Lenovo L430             0x350f02        b9, 15, 0c      2 hw buttons (*)
@@ -1651,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse)
                     etd->capabilities[0], etd->capabilities[1],
                     etd->capabilities[2]);
 
+       if (etd->hw_version != 1) {
+               if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) {
+                       psmouse_err(psmouse, "failed to query sample data\n");
+                       goto init_fail;
+               }
+               psmouse_info(psmouse,
+                            "Elan sample query result %02x, %02x, %02x\n",
+                            etd->samples[0], etd->samples[1], etd->samples[2]);
+       }
+
        if (elantech_set_absolute_mode(psmouse)) {
                psmouse_err(psmouse,
                            "failed to put touchpad into absolute mode.\n");
index f965d1569cc338059cdd540bad44ed927c6ddc3e..e1cbf409d9c8d0d4e7d21e13d57851ae6565b535 100644 (file)
@@ -129,6 +129,7 @@ struct elantech_data {
        unsigned char reg_26;
        unsigned char debug;
        unsigned char capabilities[3];
+       unsigned char samples[3];
        bool paritycheck;
        bool jumpy_cursor;
        bool reports_pressure;
index b7d54d428b5e55d1520d52e68b95202593cf4b53..ff4be0515a0dc7dbb206ae0a84968f922817101e 100644 (file)
@@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
index 06022952a43780a6048f1890486cd1e713eab43b..bbef98e7a16efb438ef38856d8e043397f674804 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mISDNif.h>
 #include <linux/mISDNdsp.h>
 #include <linux/export.h>
+#include <linux/bitrev.h>
 #include "core.h"
 #include "dsp.h"
 
@@ -137,27 +138,14 @@ static unsigned char linear2ulaw(short sample)
        return ulawbyte;
 }
 
-static int reverse_bits(int i)
-{
-       int z, j;
-       z = 0;
-
-       for (j = 0; j < 8; j++) {
-               if ((i & (1 << j)) != 0)
-                       z |= 1 << (7 - j);
-       }
-       return z;
-}
-
-
 void dsp_audio_generate_law_tables(void)
 {
        int i;
        for (i = 0; i < 256; i++)
-               dsp_audio_alaw_to_s32[i] = alaw2linear(reverse_bits(i));
+               dsp_audio_alaw_to_s32[i] = alaw2linear(bitrev8((u8)i));
 
        for (i = 0; i < 256; i++)
-               dsp_audio_ulaw_to_s32[i] = ulaw2linear(reverse_bits(i));
+               dsp_audio_ulaw_to_s32[i] = ulaw2linear(bitrev8((u8)i));
 
        for (i = 0; i < 256; i++) {
                dsp_audio_alaw_to_ulaw[i] =
@@ -176,13 +164,13 @@ dsp_audio_generate_s2law_table(void)
                /* generating ulaw-table */
                for (i = -32768; i < 32768; i++) {
                        dsp_audio_s16_to_law[i & 0xffff] =
-                               reverse_bits(linear2ulaw(i));
+                               bitrev8(linear2ulaw(i));
                }
        } else {
                /* generating alaw-table */
                for (i = -32768; i < 32768; i++) {
                        dsp_audio_s16_to_law[i & 0xffff] =
-                               reverse_bits(linear2alaw(i));
+                               bitrev8(linear2alaw(i));
                }
        }
 }
index 1a57e88a38f7554ef3c09199090ce52575d041ef..cd35079c8c98b69469e1e64794b6a7345c382f3b 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
 #include <linux/fcntl.h>
-#include <linux/init.h>
+#include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
 
index 32814371b8d304539a2eb1cfece077510b4ca394..aa1b41ca40f778dcb4e6c0e393ab4ee33d25d388 100644 (file)
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("mq cache policy");
-
-MODULE_ALIAS("dm-cache-default");
index 48a4a826ae07649419d033b99c564b2adb9da6ea..200366c62231dd5f8f38f74284a42810c8603d19 100644 (file)
@@ -1789,3 +1789,5 @@ module_exit(smq_exit);
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("smq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
index 48dfe3c4d6aa7968bbc1986eafcb9e965fe55950..6ba47cfb1443748ccf092819a6a5ef160bb856fd 100644 (file)
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
                return r;
 
        disk_super = dm_block_data(copy);
-       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
-       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+       dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
+       dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
        dm_sm_dec_block(pmd->metadata_sm, held_root);
 
        return dm_tm_unlock(pmd->tm, copy);
index ab37ae114e943c20c161f88b8c2a739206bfafab..0d7ab20c58dffc40d5c56c9427b7dd7f090c8bd3 100644 (file)
@@ -1729,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q,
        struct mapped_device *md = q->queuedata;
        struct dm_table *map = dm_get_live_table_fast(md);
        struct dm_target *ti;
-       sector_t max_sectors, max_size = 0;
+       sector_t max_sectors;
+       int max_size = 0;
 
        if (unlikely(!map))
                goto out;
@@ -1742,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q,
         * Find maximum amount of I/O that won't need splitting
         */
        max_sectors = min(max_io_len(bvm->bi_sector, ti),
-                         (sector_t) queue_max_sectors(q));
+                         (sector_t) BIO_MAX_SECTORS);
        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-
-       /*
-        * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
-        * to the targets' merge function since it holds sectors not bytes).
-        * Just doing this as an interim fix for stable@ because the more
-        * comprehensive cleanup of switching to sector_t will impact every
-        * DM target that implements a ->merge hook.
-        */
-       if (max_size > INT_MAX)
-               max_size = INT_MAX;
+       if (max_size < 0)
+               max_size = 0;
 
        /*
         * merge_bvec_fn() returns number of bytes
@@ -1761,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q,
         * max is precomputed maximal io size
         */
        if (max_size && ti->type->merge)
-               max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
+               max_size = ti->type->merge(ti, bvm, biovec, max_size);
        /*
         * If the target doesn't support merge method and some of the devices
-        * provided their merge_bvec method (we know this by looking for the
-        * max_hw_sectors that dm_set_device_limits may set), then we can't
-        * allow bios with multiple vector entries.  So always set max_size
-        * to 0, and the code below allows just one page.
+        * provided their merge_bvec method (we know this by looking at
+        * queue_max_hw_sectors), then we can't allow bios with multiple vector
+        * entries.  So always set max_size to 0, and the code below allows
+        * just one page.
         */
        else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
                max_size = 0;
index 0c2a4e8b873c659dbc260b2aa5484c7a5e87b176..e25f00f0138a7b4d82a5ae4f6fc7e1b6f0bb1b30 100644 (file)
@@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
        char *ptr;
        int err;
 
-       file = kmalloc(sizeof(*file), GFP_NOIO);
+       file = kzalloc(sizeof(*file), GFP_NOIO);
        if (!file)
                return -ENOMEM;
 
index bf2b80d5c4707a64210b5e57deb785069dc7d921..8731b6ea026bd9b8cfbe2a21bbc06366e509181c 100644 (file)
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
 
 extern struct dm_block_validator btree_node_validator;
 
+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+                          struct dm_btree_value_type *vt);
+
 #endif /* DM_BTREE_INTERNAL_H */
index 9836c0ae897c33c4e227bca77cc95026c193f73c..4222f774cf369b1eb1b031bd652854c573b224af 100644 (file)
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
        return r;
 }
 
-static struct dm_btree_value_type le64_type = {
-       .context = NULL,
-       .size = sizeof(__le64),
-       .inc = NULL,
-       .dec = NULL,
-       .equal = NULL
-};
-
 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
                    uint64_t *keys, dm_block_t *new_root)
 {
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
        int index = 0, r = 0;
        struct shadow_spine spine;
        struct btree_node *n;
+       struct dm_btree_value_type le64_vt;
 
+       init_le64_type(info->tm, &le64_vt);
        init_shadow_spine(&spine, info);
        for (level = 0; level < info->levels; level++) {
                r = remove_raw(&spine, info,
                               (level == last_level ?
-                               &info->value_type : &le64_type),
+                               &info->value_type : &le64_vt),
                               root, keys[level], (unsigned *)&index);
                if (r < 0)
                        break;
@@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
        int index = 0, r = 0;
        struct shadow_spine spine;
        struct btree_node *n;
+       struct dm_btree_value_type le64_vt;
        uint64_t k;
 
+       init_le64_type(info->tm, &le64_vt);
        init_shadow_spine(&spine, info);
        for (level = 0; level < last_level; level++) {
-               r = remove_raw(&spine, info, &le64_type,
+               r = remove_raw(&spine, info, &le64_vt,
                               root, keys[level], (unsigned *) &index);
                if (r < 0)
                        goto out;
@@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
                                             value_ptr(n, index));
 
                delete_at(n, index);
+               keys[last_level] = k + 1ull;
 
        } else
                r = -ENODATA;
index 1b5e13ec7f96a670ed7a9b5b472a5d2ee95a7dff..0dee514ba4c5f9e8d34d16e9d239ef333395c7d4 100644 (file)
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
 {
        return s->root;
 }
+
+static void le64_inc(void *context, const void *value_le)
+{
+       struct dm_transaction_manager *tm = context;
+       __le64 v_le;
+
+       memcpy(&v_le, value_le, sizeof(v_le));
+       dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, const void *value_le)
+{
+       struct dm_transaction_manager *tm = context;
+       __le64 v_le;
+
+       memcpy(&v_le, value_le, sizeof(v_le));
+       dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
+{
+       __le64 v1_le, v2_le;
+
+       memcpy(&v1_le, value1_le, sizeof(v1_le));
+       memcpy(&v2_le, value2_le, sizeof(v2_le));
+       return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+                   struct dm_btree_value_type *vt)
+{
+       vt->context = tm;
+       vt->size = sizeof(__le64);
+       vt->inc = le64_inc;
+       vt->dec = le64_dec;
+       vt->equal = le64_equal;
+}
index fdd3793e22f957ef08db71f897607c68ce6eb6a3..c7726cebc4950c24cb6f6f2b7cacfa6465bddeb6 100644 (file)
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
        struct btree_node *n;
        struct dm_btree_value_type le64_type;
 
-       le64_type.context = NULL;
-       le64_type.size = sizeof(__le64);
-       le64_type.inc = NULL;
-       le64_type.dec = NULL;
-       le64_type.equal = NULL;
-
+       init_le64_type(info->tm, &le64_type);
        init_shadow_spine(&spine, info);
 
        for (level = 0; level < (info->levels - 1); level++) {
index 94f5b55069e09610f21ea640f5dff3efd7e580ca..967a4ed73929ff44a38d9475c5e362fc2914c758 100644 (file)
@@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r1conf *conf = mddev->private;
+       unsigned long flags;
 
        /*
         * If it is not operational, then we have already marked it as dead
@@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
                return;
        }
        set_bit(Blocked, &rdev->flags);
+       spin_lock_irqsave(&conf->device_lock, flags);
        if (test_and_clear_bit(In_sync, &rdev->flags)) {
-               unsigned long flags;
-               spin_lock_irqsave(&conf->device_lock, flags);
                mddev->degraded++;
                set_bit(Faulty, &rdev->flags);
-               spin_unlock_irqrestore(&conf->device_lock, flags);
        } else
                set_bit(Faulty, &rdev->flags);
+       spin_unlock_irqrestore(&conf->device_lock, flags);
        /*
         * if recovery is running, make sure it aborts.
         */
@@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
         * Find all failed disks within the RAID1 configuration
         * and mark them readable.
         * Called under mddev lock, so rcu protection not needed.
+        * device_lock used to avoid races with raid1_end_read_request
+        * which expects 'In_sync' flags and ->degraded to be consistent.
         */
+       spin_lock_irqsave(&conf->device_lock, flags);
        for (i = 0; i < conf->raid_disks; i++) {
                struct md_rdev *rdev = conf->mirrors[i].rdev;
                struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
                        sysfs_notify_dirent_safe(rdev->sysfs_state);
                }
        }
-       spin_lock_irqsave(&conf->device_lock, flags);
        mddev->degraded -= count;
        spin_unlock_irqrestore(&conf->device_lock, flags);
 
index 643d217bfa13ac8caa3dee9f9dd65d57f165bbe8..f757023fc4580680bfdd6e178f93acb62cb1f31e 100644 (file)
@@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
 static int drop_one_stripe(struct r5conf *conf)
 {
        struct stripe_head *sh;
-       int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
+       int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
 
        spin_lock_irq(conf->hash_locks + hash);
        sh = get_free_stripe(conf, hash);
@@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
 
        if (mutex_trylock(&conf->cache_size_mutex)) {
                ret= 0;
-               while (ret < sc->nr_to_scan) {
+               while (ret < sc->nr_to_scan &&
+                      conf->max_nr_stripes > conf->min_nr_stripes) {
                        if (drop_one_stripe(conf) == 0) {
                                ret = SHRINK_STOP;
                                break;
index 3a27a84ad3ec376a2543c1ac9568c30e5d7c131b..9426276dbe1402b1445dd7b84da6d7fca38893a6 100644 (file)
@@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
 {
        int i;
 
+       if (!gpmc_base)
+               return;
+
        gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
        gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
        gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
 {
        int i;
 
+       if (!gpmc_base)
+               return;
+
        gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
        gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
        gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
index 653815950aa2416b277718df69213545573aa557..3f68dd251ce89304bf044960568c58c11aca8fdd 100644 (file)
@@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C
 
 config MFD_CROS_EC_SPI
        tristate "ChromeOS Embedded Controller (SPI)"
-       depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF
+       depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
 
        ---help---
          If you say Y here, you get support for talking to the ChromeOS EC
index bebf58a06a6b2932d57798c0b5b81a31a12e9b7a..a72ddb2950784cf044fbfb5156ebd68866bbea48 100644 (file)
@@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev)
 
                arizona->has_fully_powered_off = true;
 
-               disable_irq(arizona->irq);
+               disable_irq_nosync(arizona->irq);
                arizona_enable_reset(arizona);
                regulator_bulk_disable(arizona->num_core_supplies,
                                       arizona->core_supplies);
@@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona)
                             arizona->pdata.gpio_defaults[i]);
        }
 
-       pm_runtime_set_autosuspend_delay(arizona->dev, 100);
-       pm_runtime_use_autosuspend(arizona->dev);
-       pm_runtime_enable(arizona->dev);
-
        /* Chip default */
        if (!arizona->pdata.clk32k_src)
                arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona)
                                           arizona->pdata.spk_fmt[i]);
        }
 
+       pm_runtime_set_active(arizona->dev);
+       pm_runtime_enable(arizona->dev);
+
        /* Set up for interrupts */
        ret = arizona_irq_init(arizona);
        if (ret != 0)
                goto err_reset;
 
+       pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+       pm_runtime_use_autosuspend(arizona->dev);
+
        arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
                            arizona_clkgen_err, arizona);
        arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona)
                goto err_irq;
        }
 
-#ifdef CONFIG_PM
-       regulator_disable(arizona->dcvdd);
-#endif
-
        return 0;
 
 err_irq:
index 2d3db81be0990a1b88109aa7614f74f201930204..6ded3dc36644a31a0bd4775f36df72f1c00a7e0d 100644 (file)
@@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
 {
        struct at24_data *at24;
 
-       if (unlikely(off >= attr->size))
-               return -EFBIG;
-
        at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
        return at24_write(at24, buf, off, count);
 }
index c18f9e62a9fa2ea181c7ab2c75faaa9bc713b2ec..f50373645ab4ade89923ae8a7f4d5d35946a8f99 100644 (file)
@@ -282,7 +282,6 @@ config VETH
 config VIRTIO_NET
        tristate "Virtio network driver"
        depends on VIRTIO
-       select AVERAGE
        ---help---
          This is the virtual network driver for virtio.  It can be used with
          lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
@@ -297,6 +296,13 @@ config NLMON
          diagnostics, etc. This is mostly intended for developers or support
          to debug netlink issues. If unsure, say N.
 
+config NET_VRF
+       tristate "Virtual Routing and Forwarding (Lite)"
+       depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES
+       ---help---
+         This option enables the support for mapping interfaces into VRF's. The
+         support enables VRF devices.
+
 endif # NET_CORE
 
 config SUNGEM_PHY
index c12cb22478a7daa39e2935f1c0aa22c26ab04bdc..ca16dd689b36ba7836b71a8a24c9b81913b5ca67 100644 (file)
@@ -25,6 +25,7 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_VXLAN) += vxlan.o
 obj-$(CONFIG_GENEVE) += geneve.o
 obj-$(CONFIG_NLMON) += nlmon.o
+obj-$(CONFIG_NET_VRF) += vrf.o
 
 #
 # Networking Drivers
index 0c627b4733ca56b026e15bf9527330539fd64cd1..0ef2ed3a610ec9d1fae6641ab74dd219a29692e9 100644 (file)
@@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
                   slave ? slave->dev->name : "NULL");
 
        if (!slave || !bond->send_peer_notif ||
+           !netif_carrier_ok(bond->dev) ||
            test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
                return false;
 
@@ -4119,9 +4120,8 @@ void bond_setup(struct net_device *bond_dev)
        SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
 
        /* Initialize the device options */
-       bond_dev->tx_queue_len = 0;
        bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
-       bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
+       bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
        bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 
        /* don't acquire bond device's netif_tx_lock when transmitting */
index b3b922adc0e4f68ed15ff34537c21e1bd7e5e81f..615c65da39bedb648817a99002235ffbae2e6c46 100644 (file)
@@ -1120,7 +1120,7 @@ static void cfhsi_setup(struct net_device *dev)
        dev->type = ARPHRD_CAIF;
        dev->flags = IFF_POINTOPOINT | IFF_NOARP;
        dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->destructor = free_netdev;
        dev->netdev_ops = &cfhsi_netdevops;
        for (i = 0; i < CFHSI_PRIO_LAST; ++i)
index 9da06537237ff220a16b3c5831c728d809d91bb1..c2dea4916e5d720bb29814153f302ec364fe4f61 100644 (file)
@@ -427,7 +427,7 @@ static void caifdev_setup(struct net_device *dev)
        dev->type = ARPHRD_CAIF;
        dev->flags = IFF_POINTOPOINT | IFF_NOARP;
        dev->mtu = CAIF_MAX_MTU;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->destructor = free_netdev;
        skb_queue_head_init(&serdev->head);
        serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
index 72ea9ff9bb9c02ae16133de4b12f83e70ec97c0d..de3962014af70c8a979f4cb63b063583ba4927a9 100644 (file)
@@ -710,7 +710,7 @@ static void cfspi_setup(struct net_device *dev)
        dev->netdev_ops = &cfspi_ops;
        dev->type = ARPHRD_CAIF;
        dev->flags = IFF_NOARP | IFF_POINTOPOINT;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->mtu = SPI_MAX_PAYLOAD_SIZE;
        dev->destructor = free_netdev;
        skb_queue_head_init(&cfspi->qhead);
index b1e8d729851cbb5173c1bbec2b34c4893b2ff595..c83f0f03482ba1d2d9f2b121d2c844af29432f3c 100644 (file)
@@ -805,7 +805,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
        if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
                reg |= FLEXCAN_CTRL_SMP;
 
-       netdev_info(dev, "writing ctrl=0x%08x\n", reg);
+       netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
        flexcan_write(reg, &regs->ctrl);
 
        /* print chip status */
index 8b4d3e6875eb17e6bca38c812132953d0c5ce2c2..5eee62badf45457798c2fabe5005faa26c6286e2 100644 (file)
@@ -162,7 +162,7 @@ struct gs_can {
        struct can_bittiming_const bt_const;
        unsigned int channel;   /* channel number */
 
-       /* This lock prevents a race condition between xmit and recieve. */
+       /* This lock prevents a race condition between xmit and receive. */
        spinlock_t tx_ctx_lock;
        struct gs_tx_context tx_context[GS_MAX_TX_URBS];
 
@@ -274,7 +274,7 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
        }
 }
 
-static void gs_usb_recieve_bulk_callback(struct urb *urb)
+static void gs_usb_receive_bulk_callback(struct urb *urb)
 {
        struct gs_usb *usbcan = urb->context;
        struct gs_can *dev;
@@ -376,7 +376,7 @@ static void gs_usb_recieve_bulk_callback(struct urb *urb)
                          usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
                          hf,
                          sizeof(struct gs_host_frame),
-                         gs_usb_recieve_bulk_callback,
+                         gs_usb_receive_bulk_callback,
                          usbcan
                          );
 
@@ -605,7 +605,7 @@ static int gs_can_open(struct net_device *netdev)
                                                          GSUSB_ENDPOINT_IN),
                                          buf,
                                          sizeof(struct gs_host_frame),
-                                         gs_usb_recieve_bulk_callback,
+                                         gs_usb_receive_bulk_callback,
                                          parent);
                        urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
index 1c7808495a9dd8198936e6db4ce23fd7f2d29ed3..735f04cd83eec7e1b629695133c5806bcb3fff08 100644 (file)
@@ -116,9 +116,9 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
        .port_join_bridge       = mv88e6xxx_join_bridge,
        .port_leave_bridge      = mv88e6xxx_leave_bridge,
        .port_stp_update        = mv88e6xxx_port_stp_update,
-       .fdb_add                = mv88e6xxx_port_fdb_add,
-       .fdb_del                = mv88e6xxx_port_fdb_del,
-       .fdb_getnext            = mv88e6xxx_port_fdb_getnext,
+       .port_fdb_add           = mv88e6xxx_port_fdb_add,
+       .port_fdb_del           = mv88e6xxx_port_fdb_del,
+       .port_fdb_getnext       = mv88e6xxx_port_fdb_getnext,
 };
 
 MODULE_ALIAS("platform:mv88e6171");
index af210efecc554546a762073eb0121823b9337dcb..14b71779df99d76988127ae77958d99796396a55 100644 (file)
@@ -123,8 +123,9 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
 
        mutex_lock(&ps->eeprom_mutex);
 
-       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
-                                 0xc000 | (addr & 0xff));
+       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+                                 GLOBAL2_EEPROM_OP_READ |
+                                 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
        if (ret < 0)
                goto error;
 
@@ -132,7 +133,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
        if (ret < 0)
                goto error;
 
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15);
+       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
 error:
        mutex_unlock(&ps->eeprom_mutex);
        return ret;
@@ -205,11 +206,11 @@ static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
 {
        int ret;
 
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14);
+       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
        if (ret < 0)
                return ret;
 
-       if (!(ret & 0x0400))
+       if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
                return -EROFS;
 
        return 0;
@@ -223,12 +224,13 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
 
        mutex_lock(&ps->eeprom_mutex);
 
-       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data);
+       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
        if (ret < 0)
                goto error;
 
-       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
-                                 0xb000 | (addr & 0xff));
+       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+                                 GLOBAL2_EEPROM_OP_WRITE |
+                                 (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
        if (ret < 0)
                goto error;
 
@@ -341,9 +343,14 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
        .port_join_bridge       = mv88e6xxx_join_bridge,
        .port_leave_bridge      = mv88e6xxx_leave_bridge,
        .port_stp_update        = mv88e6xxx_port_stp_update,
-       .fdb_add                = mv88e6xxx_port_fdb_add,
-       .fdb_del                = mv88e6xxx_port_fdb_del,
-       .fdb_getnext            = mv88e6xxx_port_fdb_getnext,
+       .port_pvid_get          = mv88e6xxx_port_pvid_get,
+       .port_pvid_set          = mv88e6xxx_port_pvid_set,
+       .port_vlan_add          = mv88e6xxx_port_vlan_add,
+       .port_vlan_del          = mv88e6xxx_port_vlan_del,
+       .vlan_getnext           = mv88e6xxx_vlan_getnext,
+       .port_fdb_add           = mv88e6xxx_port_fdb_add,
+       .port_fdb_del           = mv88e6xxx_port_fdb_del,
+       .port_fdb_getnext       = mv88e6xxx_port_fdb_getnext,
 };
 
 MODULE_ALIAS("platform:mv88e6172");
index 109452056eff9fc68074bb82b9ccdf8f06adf7de..3774f53d28d781aaec7f5150352a944ba505d974 100644 (file)
@@ -2,6 +2,9 @@
  * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
  * Copyright (c) 2008 Marvell Semiconductor
  *
+ * Copyright (c) 2015 CMC Electronics, Inc.
+ *     Added support for VLAN Table Unit operations
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -964,7 +967,7 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
 {
        int ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
        if (ret < 0)
                return ret;
 
@@ -1091,7 +1094,7 @@ int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
        ps->bridge_mask[fid] = br_port_mask;
 
        if (fid != ps->fid[port]) {
-               ps->fid_mask |= 1 << ps->fid[port];
+               clear_bit(ps->fid[port], ps->fid_bitmap);
                ps->fid[port] = fid;
                ret = _mv88e6xxx_update_bridge_config(ds, fid);
        }
@@ -1125,9 +1128,16 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
 
        mutex_lock(&ps->smi_mutex);
 
-       newfid = __ffs(ps->fid_mask);
+       newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
+       if (unlikely(newfid > ps->num_ports)) {
+               netdev_err(ds->ports[port], "all first %d FIDs are used\n",
+                          ps->num_ports);
+               ret = -ENOSPC;
+               goto unlock;
+       }
+
        ps->fid[port] = newfid;
-       ps->fid_mask &= ~(1 << newfid);
+       set_bit(newfid, ps->fid_bitmap);
        ps->bridge_mask[fid] &= ~(1 << port);
        ps->bridge_mask[newfid] = 1 << port;
 
@@ -1135,6 +1145,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
        if (!ret)
                ret = _mv88e6xxx_update_bridge_config(ds, newfid);
 
+unlock:
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
@@ -1174,8 +1185,476 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
        return 0;
 }
 
-static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
-                                 const unsigned char *addr)
+int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+{
+       int ret;
+
+       ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
+       if (ret < 0)
+               return ret;
+
+       *pvid = ret & PORT_DEFAULT_VLAN_MASK;
+
+       return 0;
+}
+
+int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
+{
+       return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+                                  pvid & PORT_DEFAULT_VLAN_MASK);
+}
+
+static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
+{
+       return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
+                              GLOBAL_VTU_OP_BUSY);
+}
+
+static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
+{
+       int ret;
+
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
+       if (ret < 0)
+               return ret;
+
+       return _mv88e6xxx_vtu_wait(ds);
+}
+
+static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
+{
+       int ret;
+
+       ret = _mv88e6xxx_vtu_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
+}
+
+static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
+                                       struct mv88e6xxx_vtu_stu_entry *entry,
+                                       unsigned int nibble_offset)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u16 regs[3];
+       int i;
+       int ret;
+
+       for (i = 0; i < 3; ++i) {
+               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+                                         GLOBAL_VTU_DATA_0_3 + i);
+               if (ret < 0)
+                       return ret;
+
+               regs[i] = ret;
+       }
+
+       for (i = 0; i < ps->num_ports; ++i) {
+               unsigned int shift = (i % 4) * 4 + nibble_offset;
+               u16 reg = regs[i / 4];
+
+               entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
+       }
+
+       return 0;
+}
+
+static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
+                                        struct mv88e6xxx_vtu_stu_entry *entry,
+                                        unsigned int nibble_offset)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u16 regs[3] = { 0 };
+       int i;
+       int ret;
+
+       for (i = 0; i < ps->num_ports; ++i) {
+               unsigned int shift = (i % 4) * 4 + nibble_offset;
+               u8 data = entry->data[i];
+
+               regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
+       }
+
+       for (i = 0; i < 3; ++i) {
+               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
+                                          GLOBAL_VTU_DATA_0_3 + i, regs[i]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
+                                 struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       struct mv88e6xxx_vtu_stu_entry next = { 0 };
+       int ret;
+
+       ret = _mv88e6xxx_vtu_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+                                  vid & GLOBAL_VTU_VID_MASK);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+       if (ret < 0)
+               return ret;
+
+       next.vid = ret & GLOBAL_VTU_VID_MASK;
+       next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+       if (next.valid) {
+               ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
+               if (ret < 0)
+                       return ret;
+
+               if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+                   mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+                       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+                                                 GLOBAL_VTU_FID);
+                       if (ret < 0)
+                               return ret;
+
+                       next.fid = ret & GLOBAL_VTU_FID_MASK;
+
+                       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+                                                 GLOBAL_VTU_SID);
+                       if (ret < 0)
+                               return ret;
+
+                       next.sid = ret & GLOBAL_VTU_SID_MASK;
+               }
+       }
+
+       *entry = next;
+       return 0;
+}
+
+static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
+                                   struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       u16 reg = 0;
+       int ret;
+
+       ret = _mv88e6xxx_vtu_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       if (!entry->valid)
+               goto loadpurge;
+
+       /* Write port member tags */
+       ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
+       if (ret < 0)
+               return ret;
+
+       if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+           mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+               reg = entry->sid & GLOBAL_VTU_SID_MASK;
+               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+               if (ret < 0)
+                       return ret;
+
+               reg = entry->fid & GLOBAL_VTU_FID_MASK;
+               ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
+               if (ret < 0)
+                       return ret;
+       }
+
+       reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+       reg |= entry->vid & GLOBAL_VTU_VID_MASK;
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+       if (ret < 0)
+               return ret;
+
+       return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+}
+
+static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
+                                 struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       struct mv88e6xxx_vtu_stu_entry next = { 0 };
+       int ret;
+
+       ret = _mv88e6xxx_vtu_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
+                                  sid & GLOBAL_VTU_SID_MASK);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
+       if (ret < 0)
+               return ret;
+
+       next.sid = ret & GLOBAL_VTU_SID_MASK;
+
+       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+       if (ret < 0)
+               return ret;
+
+       next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+       if (next.valid) {
+               ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
+               if (ret < 0)
+                       return ret;
+       }
+
+       *entry = next;
+       return 0;
+}
+
+static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
+                                   struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       u16 reg = 0;
+       int ret;
+
+       ret = _mv88e6xxx_vtu_wait(ds);
+       if (ret < 0)
+               return ret;
+
+       if (!entry->valid)
+               goto loadpurge;
+
+       /* Write port states */
+       ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
+       if (ret < 0)
+               return ret;
+
+       reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+       if (ret < 0)
+               return ret;
+
+       reg = entry->sid & GLOBAL_VTU_SID_MASK;
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+       if (ret < 0)
+               return ret;
+
+       return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+}
+
+static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
+                               struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       struct mv88e6xxx_vtu_stu_entry vlan = {
+               .valid = true,
+               .vid = vid,
+       };
+       int i;
+
+       /* exclude all ports except the CPU */
+       for (i = 0; i < ps->num_ports; ++i)
+               vlan.data[i] = dsa_is_cpu_port(ds, i) ?
+                       GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
+                       GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+       if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+           mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+               struct mv88e6xxx_vtu_stu_entry vstp;
+               int err;
+
+               /* Adding a VTU entry requires a valid STU entry. As VSTP is not
+                * implemented, only one STU entry is needed to cover all VTU
+                * entries. Thus, validate the SID 0.
+                */
+               vlan.sid = 0;
+               err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
+               if (err)
+                       return err;
+
+               if (vstp.sid != vlan.sid || !vstp.valid) {
+                       memset(&vstp, 0, sizeof(vstp));
+                       vstp.valid = true;
+                       vstp.sid = vlan.sid;
+
+                       err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
+                       if (err)
+                               return err;
+               }
+
+               /* Non-bridged ports and bridge groups use FIDs from 1 to
+                * num_ports; VLANs use FIDs from num_ports+1 to 4095.
+                */
+               vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
+                                             ps->num_ports + 1);
+               if (unlikely(vlan.fid == VLAN_N_VID)) {
+                       pr_err("no more FID available for VLAN %d\n", vid);
+                       return -ENOSPC;
+               }
+
+               err = _mv88e6xxx_flush_fid(ds, vlan.fid);
+               if (err)
+                       return err;
+
+               set_bit(vlan.fid, ps->fid_bitmap);
+       }
+
+       *entry = vlan;
+       return 0;
+}
+
+int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+                           bool untagged)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       struct mv88e6xxx_vtu_stu_entry vlan;
+       int err;
+
+       mutex_lock(&ps->smi_mutex);
+       err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+       if (err)
+               goto unlock;
+
+       if (vlan.vid != vid || !vlan.valid) {
+               err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
+               if (err)
+                       goto unlock;
+       }
+
+       vlan.data[port] = untagged ?
+               GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
+               GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
+
+       err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+unlock:
+       mutex_unlock(&ps->smi_mutex);
+
+       return err;
+}
+
+int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       struct mv88e6xxx_vtu_stu_entry vlan;
+       bool keep = false;
+       int i, err;
+
+       mutex_lock(&ps->smi_mutex);
+
+       err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+       if (err)
+               goto unlock;
+
+       if (vlan.vid != vid || !vlan.valid ||
+           vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+               err = -ENOENT;
+               goto unlock;
+       }
+
+       vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+       /* keep the VLAN unless all ports are excluded */
+       for (i = 0; i < ps->num_ports; ++i) {
+               if (dsa_is_cpu_port(ds, i))
+                       continue;
+
+               if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+                       keep = true;
+                       break;
+               }
+       }
+
+       vlan.valid = keep;
+       err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+       if (err)
+               goto unlock;
+
+       if (!keep)
+               clear_bit(vlan.fid, ps->fid_bitmap);
+
+unlock:
+       mutex_unlock(&ps->smi_mutex);
+
+       return err;
+}
+
+static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
+                                      struct mv88e6xxx_vtu_stu_entry *entry)
+{
+       int err;
+
+       do {
+               if (vid == 4095)
+                       return -ENOENT;
+
+               err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
+               if (err)
+                       return err;
+
+               if (!entry->valid)
+                       return -ENOENT;
+
+               vid = entry->vid;
+       } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
+                entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
+
+       return 0;
+}
+
+int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
+                          unsigned long *ports, unsigned long *untagged)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       struct mv88e6xxx_vtu_stu_entry next;
+       int port;
+       int err;
+
+       if (*vid == 4095)
+               return -ENOENT;
+
+       mutex_lock(&ps->smi_mutex);
+       err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
+       mutex_unlock(&ps->smi_mutex);
+
+       if (err)
+               return err;
+
+       if (!next.valid)
+               return -ENOENT;
+
+       *vid = next.vid;
+
+       for (port = 0; port < ps->num_ports; ++port) {
+               clear_bit(port, ports);
+               clear_bit(port, untagged);
+
+               if (dsa_is_cpu_port(ds, port))
+                       continue;
+
+               if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
+                   next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+                       set_bit(port, ports);
+
+               if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+                       set_bit(port, untagged);
+       }
+
+       return 0;
+}
+
+static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
+                                   const unsigned char *addr)
 {
        int i, ret;
 
@@ -1190,7 +1669,7 @@ static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
        return 0;
 }
 
-static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
+static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
 {
        int i, ret;
 
@@ -1206,29 +1685,83 @@ static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
        return 0;
 }
 
-static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
-                                   const unsigned char *addr, int state)
+static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
+                              struct mv88e6xxx_atu_entry *entry)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       u8 fid = ps->fid[port];
+       u16 reg = 0;
        int ret;
 
        ret = _mv88e6xxx_atu_wait(ds);
        if (ret < 0)
                return ret;
 
-       ret = __mv88e6xxx_write_addr(ds, addr);
+       ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
        if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
-                                  (0x10 << port) | state);
-       if (ret)
+       if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+               unsigned int mask, shift;
+
+               if (entry->trunk) {
+                       reg |= GLOBAL_ATU_DATA_TRUNK;
+                       mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+                       shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+               } else {
+                       mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+                       shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+               }
+
+               reg |= (entry->portv_trunkid << shift) & mask;
+       }
+
+       reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK;
+
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg);
+       if (ret < 0)
                return ret;
 
-       ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
+       return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
+}
 
-       return ret;
+static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       struct mv88e6xxx_vtu_stu_entry vlan;
+       int err;
+
+       if (vid == 0)
+               return ps->fid[port];
+
+       err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
+       if (err)
+               return err;
+
+       if (vlan.vid == vid)
+               return vlan.fid;
+
+       return -ENOENT;
+}
+
+static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
+                                   const unsigned char *addr, u16 vid,
+                                   u8 state)
+{
+       struct mv88e6xxx_atu_entry entry = { 0 };
+       int ret;
+
+       ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
+       if (ret < 0)
+               return ret;
+
+       entry.fid = ret;
+       entry.state = state;
+       ether_addr_copy(entry.mac, addr);
+       if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+               entry.trunk = false;
+               entry.portv_trunkid = BIT(port);
+       }
+
+       return _mv88e6xxx_atu_load(ds, &entry);
 }
 
 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
@@ -1241,7 +1774,7 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
+       ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
@@ -1254,61 +1787,105 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
+       ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
                                       GLOBAL_ATU_DATA_STATE_UNUSED);
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
 }
 
-static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
-                                   unsigned char *addr, bool *is_static)
+static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
+                                 const unsigned char *addr,
+                                 struct mv88e6xxx_atu_entry *entry)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       u8 fid = ps->fid[port];
-       int ret, state;
+       struct mv88e6xxx_atu_entry next = { 0 };
+       int ret;
+
+       next.fid = fid;
 
        ret = _mv88e6xxx_atu_wait(ds);
        if (ret < 0)
                return ret;
 
-       ret = __mv88e6xxx_write_addr(ds, addr);
+       ret = _mv88e6xxx_atu_mac_write(ds, addr);
        if (ret < 0)
                return ret;
 
-       do {
-               ret = _mv88e6xxx_atu_cmd(ds, fid,  GLOBAL_ATU_OP_GET_NEXT_DB);
-               if (ret < 0)
-                       return ret;
+       ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+       if (ret < 0)
+               return ret;
 
-               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
-               if (ret < 0)
-                       return ret;
-               state = ret & GLOBAL_ATU_DATA_STATE_MASK;
-               if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
-                       return -ENOENT;
-       } while (!(((ret >> 4) & 0xff) & (1 << port)));
+       ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
+       if (ret < 0)
+               return ret;
 
-       ret = __mv88e6xxx_read_addr(ds, addr);
+       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
        if (ret < 0)
                return ret;
 
-       *is_static = state == (is_multicast_ether_addr(addr) ?
-                              GLOBAL_ATU_DATA_STATE_MC_STATIC :
-                              GLOBAL_ATU_DATA_STATE_UC_STATIC);
+       next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+       if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+               unsigned int mask, shift;
+
+               if (ret & GLOBAL_ATU_DATA_TRUNK) {
+                       next.trunk = true;
+                       mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+                       shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+               } else {
+                       next.trunk = false;
+                       mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+                       shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+               }
+
+               next.portv_trunkid = (ret & mask) >> shift;
+       }
 
+       *entry = next;
        return 0;
 }
 
 /* get next entry for port */
 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-                              unsigned char *addr, bool *is_static)
+                              unsigned char *addr, u16 *vid, bool *is_static)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       struct mv88e6xxx_atu_entry next;
+       u16 fid;
        int ret;
 
        mutex_lock(&ps->smi_mutex);
-       ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
+
+       ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
+       if (ret < 0)
+               goto unlock;
+       fid = ret;
+
+       do {
+               if (is_broadcast_ether_addr(addr)) {
+                       struct mv88e6xxx_vtu_stu_entry vtu;
+
+                       ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
+                       if (ret < 0)
+                               goto unlock;
+
+                       *vid = vtu.vid;
+                       fid = vtu.fid;
+               }
+
+               ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
+               if (ret < 0)
+                       goto unlock;
+
+               ether_addr_copy(addr, next.mac);
+
+               if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+                       continue;
+       } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
+
+       *is_static = next.state == (is_multicast_ether_addr(addr) ?
+                                   GLOBAL_ATU_DATA_STATE_MC_STATIC :
+                                   GLOBAL_ATU_DATA_STATE_UC_STATIC);
+unlock:
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
@@ -1349,8 +1926,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                 * full duplex.
                 */
                reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
-               if (dsa_is_cpu_port(ds, port) ||
-                   ds->dsa_port_mask & (1 << port)) {
+               if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
                        reg |= PORT_PCS_CTRL_FORCE_LINK |
                                PORT_PCS_CTRL_LINK_UP |
                                PORT_PCS_CTRL_DUPLEX_FULL |
@@ -1411,12 +1987,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                                reg |= PORT_CONTROL_EGRESS_ADD_TAG;
                }
        }
-       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-           mv88e6xxx_6320_family(ds)) {
-               if (ds->dsa_port_mask & (1 << port))
+       if (dsa_is_dsa_port(ds, port)) {
+               if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+                       reg |= PORT_CONTROL_DSA_TAG;
+               if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+                   mv88e6xxx_6320_family(ds)) {
                        reg |= PORT_CONTROL_FRAME_MODE_DSA;
+               }
+
                if (port == dsa_upstream_port(ds))
                        reg |= PORT_CONTROL_FORWARD_UNKNOWN |
                                PORT_CONTROL_FORWARD_UNKNOWN_MC;
@@ -1428,13 +2007,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                        goto abort;
        }
 
-       /* Port Control 2: don't force a good FCS, set the maximum
-        * frame size to 10240 bytes, don't let the switch add or
-        * strip 802.1q tags, don't discard tagged or untagged frames
-        * on this port, do a destination address lookup on all
-        * received packets as usual, disable ARP mirroring and don't
-        * send a copy of all transmitted/received frames on this port
-        * to the CPU.
+       /* Port Control 2: don't force a good FCS, set the maximum frame size to
+        * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
+        * untagged frames on this port, do a destination address lookup on all
+        * received packets as usual, disable ARP mirroring and don't send a
+        * copy of all transmitted/received frames on this port to the CPU.
         */
        reg = 0;
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
@@ -1456,6 +2033,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                        reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
        }
 
+       reg |= PORT_CONTROL_2_8021Q_FALLBACK;
+
        if (reg) {
                ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
                                           PORT_CONTROL_2, reg);
@@ -1552,9 +2131,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
         * ports, and allow each of the 'real' ports to only talk to
         * the upstream port.
         */
-       fid = __ffs(ps->fid_mask);
+       fid = port + 1;
        ps->fid[port] = fid;
-       ps->fid_mask &= ~(1 << fid);
+       set_bit(fid, ps->fid_bitmap);
 
        if (!dsa_is_cpu_port(ds, port))
                ps->bridge_mask[fid] = 1 << port;
@@ -1651,7 +2230,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
        unsigned char addr[6];
        int ret, data, state;
 
-       ret = __mv88e6xxx_write_addr(ds, bcast);
+       ret = _mv88e6xxx_atu_mac_write(ds, bcast);
        if (ret < 0)
                return ret;
 
@@ -1666,7 +2245,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
                state = data & GLOBAL_ATU_DATA_STATE_MASK;
                if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
                        break;
-               ret = __mv88e6xxx_read_addr(ds, addr);
+               ret = _mv88e6xxx_atu_mac_read(ds, addr);
                if (ret < 0)
                        return ret;
                mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
@@ -1853,8 +2432,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
 
        ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
 
-       ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
-
        INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
 
        name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
@@ -1982,6 +2559,12 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
        /* Wait for the flush to complete. */
        mutex_lock(&ps->smi_mutex);
        ret = _mv88e6xxx_stats_wait(ds);
+       if (ret < 0)
+               goto unlock;
+
+       /* Clear all the VTU and STU entries */
+       ret = _mv88e6xxx_vtu_stu_flush(ds);
+unlock:
        mutex_unlock(&ps->smi_mutex);
 
        return ret;
index 78e37226a37d2d90fd8c8924d484c057a7700d4a..72ca887feb0d56bafb47334b28eafe660efc418d 100644 (file)
@@ -11,6 +11,8 @@
 #ifndef __MV88E6XXX_H
 #define __MV88E6XXX_H
 
+#include <linux/if_vlan.h>
+
 #ifndef UINT64_MAX
 #define UINT64_MAX             (u64)(~((u64)0))
 #endif
 #define PORT_CONTROL_1         0x05
 #define PORT_BASE_VLAN         0x06
 #define PORT_DEFAULT_VLAN      0x07
+#define PORT_DEFAULT_VLAN_MASK 0xfff
 #define PORT_CONTROL_2         0x08
 #define PORT_CONTROL_2_IGNORE_FCS      BIT(15)
 #define PORT_CONTROL_2_VTU_PRI_OVERRIDE        BIT(14)
 #define PORT_CONTROL_2_JUMBO_1522      (0x00 << 12)
 #define PORT_CONTROL_2_JUMBO_2048      (0x01 << 12)
 #define PORT_CONTROL_2_JUMBO_10240     (0x02 << 12)
+#define PORT_CONTROL_2_8021Q_MASK      (0x03 << 10)
+#define PORT_CONTROL_2_8021Q_DISABLED  (0x00 << 10)
+#define PORT_CONTROL_2_8021Q_FALLBACK  (0x01 << 10)
+#define PORT_CONTROL_2_8021Q_CHECK     (0x02 << 10)
+#define PORT_CONTROL_2_8021Q_SECURE    (0x03 << 10)
 #define PORT_CONTROL_2_DISCARD_TAGGED  BIT(9)
 #define PORT_CONTROL_2_DISCARD_UNTAGGED        BIT(8)
 #define PORT_CONTROL_2_MAP_DA          BIT(7)
 #define GLOBAL_MAC_01          0x01
 #define GLOBAL_MAC_23          0x02
 #define GLOBAL_MAC_45          0x03
+#define GLOBAL_ATU_FID         0x01    /* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID         0x02    /* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID_MASK    0xfff
+#define GLOBAL_VTU_SID         0x03    /* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_SID_MASK    0x3f
 #define GLOBAL_CONTROL         0x04
 #define GLOBAL_CONTROL_SW_RESET                BIT(15)
 #define GLOBAL_CONTROL_PPU_ENABLE      BIT(14)
 #define GLOBAL_CONTROL_TCAM_EN         BIT(1)
 #define GLOBAL_CONTROL_EEPROM_DONE_EN  BIT(0)
 #define GLOBAL_VTU_OP          0x05
+#define GLOBAL_VTU_OP_BUSY     BIT(15)
+#define GLOBAL_VTU_OP_FLUSH_ALL                ((0x01 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_LOAD_PURGE   ((0x03 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_GET_NEXT     ((0x04 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_LOAD_PURGE   ((0x05 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_GET_NEXT     ((0x06 << 12) | GLOBAL_VTU_OP_BUSY)
 #define GLOBAL_VTU_VID         0x06
+#define GLOBAL_VTU_VID_MASK    0xfff
+#define GLOBAL_VTU_VID_VALID   BIT(12)
 #define GLOBAL_VTU_DATA_0_3    0x07
 #define GLOBAL_VTU_DATA_4_7    0x08
 #define GLOBAL_VTU_DATA_8_11   0x09
+#define GLOBAL_VTU_STU_DATA_MASK               0x03
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED  0x00
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED    0x01
+#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED      0x02
+#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER  0x03
+#define GLOBAL_STU_DATA_PORT_STATE_DISABLED    0x00
+#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING    0x01
+#define GLOBAL_STU_DATA_PORT_STATE_LEARNING    0x02
+#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING  0x03
 #define GLOBAL_ATU_CONTROL     0x0a
 #define GLOBAL_ATU_CONTROL_LEARN2ALL   BIT(3)
 #define GLOBAL_ATU_OP          0x0b
 #define GLOBAL_ATU_OP_GET_CLR_VIOLATION          ((7 << 12) | GLOBAL_ATU_OP_BUSY)
 #define GLOBAL_ATU_DATA                0x0c
 #define GLOBAL_ATU_DATA_TRUNK                  BIT(15)
+#define GLOBAL_ATU_DATA_TRUNK_ID_MASK          0x00f0
+#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT         4
 #define GLOBAL_ATU_DATA_PORT_VECTOR_MASK       0x3ff0
 #define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT      4
 #define GLOBAL_ATU_DATA_STATE_MASK             0x0f
 #define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP                BIT(3)
 #define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT                0
 #define GLOBAL2_EEPROM_OP      0x14
-#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
-#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
+#define GLOBAL2_EEPROM_OP_BUSY         BIT(15)
+#define GLOBAL2_EEPROM_OP_WRITE                ((3 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_READ         ((4 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_LOAD         BIT(11)
+#define GLOBAL2_EEPROM_OP_WRITE_EN     BIT(10)
+#define GLOBAL2_EEPROM_OP_ADDR_MASK    0xff
 #define GLOBAL2_EEPROM_DATA    0x15
 #define GLOBAL2_PTP_AVB_OP     0x16
 #define GLOBAL2_PTP_AVB_DATA   0x17
 #define GLOBAL2_QOS_WEIGHT     0x1c
 #define GLOBAL2_MISC           0x1d
 
+struct mv88e6xxx_atu_entry {
+       u16     fid;
+       u8      state;
+       bool    trunk;
+       u16     portv_trunkid;
+       u8      mac[ETH_ALEN];
+};
+
+struct mv88e6xxx_vtu_stu_entry {
+       /* VTU only */
+       u16     vid;
+       u16     fid;
+
+       /* VTU and STU */
+       u8      sid;
+       bool    valid;
+       u8      data[DSA_MAX_PORTS];
+};
+
 struct mv88e6xxx_priv_state {
        /* When using multi-chip addressing, this mutex protects
         * access to the indirect access registers.  (In single-chip
@@ -347,9 +402,9 @@ struct mv88e6xxx_priv_state {
 
        /* hw bridging */
 
-       u32 fid_mask;
-       u8 fid[DSA_MAX_PORTS];
-       u16 bridge_mask[DSA_MAX_PORTS];
+       DECLARE_BITMAP(fid_bitmap, VLAN_N_VID); /* FIDs 1 to 4095 available */
+       u16 fid[DSA_MAX_PORTS];                 /* per (non-bridged) port FID */
+       u16 bridge_mask[DSA_MAX_PORTS];         /* br groups (indexed by FID) */
 
        unsigned long port_state_update_mask;
        u8 port_state[DSA_MAX_PORTS];
@@ -409,12 +464,19 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
 int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
 int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
+int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid);
+int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 vid);
+int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+                           bool untagged);
+int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid);
+int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
+                          unsigned long *ports, unsigned long *untagged);
 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
                           const unsigned char *addr, u16 vid);
 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
                           const unsigned char *addr, u16 vid);
 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-                              unsigned char *addr, bool *is_static);
+                              unsigned char *addr, u16 *vid, bool *is_static);
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
                             int reg, int val);
index 49adbf1b7574211dcd97db2c8cc970e427934c87..815eb94990f5edb50883bf9d9a4f68a1dc535e65 100644 (file)
@@ -144,10 +144,9 @@ static void dummy_setup(struct net_device *dev)
        dev->destructor = free_netdev;
 
        /* Fill in device structure with ethernet-generic values. */
-       dev->tx_queue_len = 0;
        dev->flags |= IFF_NOARP;
        dev->flags &= ~IFF_MULTICAST;
-       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
        dev->features   |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
        dev->features   |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
        eth_hw_addr_random(dev);
index 2d1ce3c5d0dd34c9fabb1399a32c49be744afdd1..2839af00f20cd1618f6522b49db652499359ff30 100644 (file)
@@ -1726,6 +1726,7 @@ vortex_up(struct net_device *dev)
        if (vp->cb_fn_base)                     /* The PCMCIA people are idiots.  */
                iowrite32(0x8000, vp->cb_fn_base + 4);
        netif_start_queue (dev);
+       netdev_reset_queue(dev);
 err_out:
        return err;
 }
@@ -1763,16 +1764,9 @@ vortex_open(struct net_device *dev)
                        vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
                }
                if (i != RX_RING_SIZE) {
-                       int j;
                        pr_emerg("%s: no memory for rx ring\n", dev->name);
-                       for (j = 0; j < i; j++) {
-                               if (vp->rx_skbuff[j]) {
-                                       dev_kfree_skb(vp->rx_skbuff[j]);
-                                       vp->rx_skbuff[j] = NULL;
-                               }
-                       }
                        retval = -ENOMEM;
-                       goto err_free_irq;
+                       goto err_free_skb;
                }
                /* Wrap the ring. */
                vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1776,13 @@ vortex_open(struct net_device *dev)
        if (!retval)
                goto out;
 
-err_free_irq:
+err_free_skb:
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               if (vp->rx_skbuff[i]) {
+                       dev_kfree_skb(vp->rx_skbuff[i]);
+                       vp->rx_skbuff[i] = NULL;
+               }
+       }
        free_irq(dev->irq, dev);
 err:
        if (vortex_debug > 1)
@@ -1936,16 +1936,18 @@ static void vortex_tx_timeout(struct net_device *dev)
                if (vp->cur_tx - vp->dirty_tx > 0  &&  ioread32(ioaddr + DownListPtr) == 0)
                        iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
                                 ioaddr + DownListPtr);
-               if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
+               if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
                        netif_wake_queue (dev);
+                       netdev_reset_queue (dev);
+               }
                if (vp->drv_flags & IS_BOOMERANG)
                        iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
                iowrite16(DownUnstall, ioaddr + EL3_CMD);
        } else {
                dev->stats.tx_dropped++;
                netif_wake_queue(dev);
+               netdev_reset_queue(dev);
        }
-
        /* Issue Tx Enable */
        iowrite16(TxEnable, ioaddr + EL3_CMD);
        dev->trans_start = jiffies; /* prevent tx timeout */
@@ -2064,6 +2066,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct vortex_private *vp = netdev_priv(dev);
        void __iomem *ioaddr = vp->ioaddr;
+       int skblen = skb->len;
 
        /* Put out the doubleword header... */
        iowrite32(skb->len, ioaddr + TX_FIFO);
@@ -2095,6 +2098,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
+       netdev_sent_queue(dev, skblen);
 
        /* Clear the Tx status stack. */
        {
@@ -2126,6 +2130,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
        void __iomem *ioaddr = vp->ioaddr;
        /* Calculate the next Tx descriptor entry. */
        int entry = vp->cur_tx % TX_RING_SIZE;
+       int skblen = skb->len;
        struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
        unsigned long flags;
        dma_addr_t dma_addr;
@@ -2231,6 +2236,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        vp->cur_tx++;
+       netdev_sent_queue(dev, skblen);
+
        if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
                netif_stop_queue (dev);
        } else {                                        /* Clear previous interrupt enable. */
@@ -2268,6 +2275,7 @@ vortex_interrupt(int irq, void *dev_id)
        int status;
        int work_done = max_interrupt_work;
        int handled = 0;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        ioaddr = vp->ioaddr;
        spin_lock(&vp->lock);
@@ -2315,6 +2323,8 @@ vortex_interrupt(int irq, void *dev_id)
                        if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
                                iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
                                pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+                               pkts_compl++;
+                               bytes_compl += vp->tx_skb->len;
                                dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
                                if (ioread16(ioaddr + TxFree) > 1536) {
                                        /*
@@ -2359,6 +2369,7 @@ vortex_interrupt(int irq, void *dev_id)
                iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
        } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
 
+       netdev_completed_queue(dev, pkts_compl, bytes_compl);
        spin_unlock(&vp->window_lock);
 
        if (vortex_debug > 4)
@@ -2383,6 +2394,7 @@ boomerang_interrupt(int irq, void *dev_id)
        int status;
        int work_done = max_interrupt_work;
        int handled = 0;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        ioaddr = vp->ioaddr;
 
@@ -2456,6 +2468,8 @@ boomerang_interrupt(int irq, void *dev_id)
                                        pci_unmap_single(VORTEX_PCI(vp),
                                                le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
 #endif
+                                       pkts_compl++;
+                                       bytes_compl += skb->len;
                                        dev_kfree_skb_irq(skb);
                                        vp->tx_skbuff[entry] = NULL;
                                } else {
@@ -2496,6 +2510,7 @@ boomerang_interrupt(int irq, void *dev_id)
                        iowrite32(0x8000, vp->cb_fn_base + 4);
 
        } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
+       netdev_completed_queue(dev, pkts_compl, bytes_compl);
 
        if (vortex_debug > 4)
                pr_debug("%s: exiting interrupt, status %4.4x.\n",
@@ -2697,7 +2712,8 @@ vortex_down(struct net_device *dev, int final_down)
        struct vortex_private *vp = netdev_priv(dev);
        void __iomem *ioaddr = vp->ioaddr;
 
-       netif_stop_queue (dev);
+       netdev_reset_queue(dev);
+       netif_stop_queue(dev);
 
        del_timer_sync(&vp->rx_oom_timer);
        del_timer_sync(&vp->timer);
index 580553d42d34fd773139cda6076f00d7bc40cce1..88ef67a998b4eed939b00a3c0120648fad02b5d7 100644 (file)
@@ -71,8 +71,6 @@ int sgdma_initialize(struct altera_tse_private *priv)
                      SGDMA_CTRLREG_INTEN |
                      SGDMA_CTRLREG_ILASTD;
 
-       priv->sgdmadesclen = sizeof(struct sgdma_descrip);
-
        INIT_LIST_HEAD(&priv->txlisthd);
        INIT_LIST_HEAD(&priv->rxlisthd);
 
@@ -254,7 +252,7 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
                unsigned int pktstatus = 0;
                dma_sync_single_for_cpu(priv->device,
                                        priv->rxdescphys,
-                                       priv->sgdmadesclen,
+                                       SGDMA_DESC_LEN,
                                        DMA_FROM_DEVICE);
 
                pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
@@ -374,7 +372,7 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 
                dma_sync_single_for_device(priv->device,
                                           priv->rxdescphys,
-                                          priv->sgdmadesclen,
+                                          SGDMA_DESC_LEN,
                                           DMA_TO_DEVICE);
 
                csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
@@ -402,7 +400,7 @@ static int sgdma_async_write(struct altera_tse_private *priv,
        csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
-                                  priv->sgdmadesclen, DMA_TO_DEVICE);
+                                  SGDMA_DESC_LEN, DMA_TO_DEVICE);
 
        csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
                priv->tx_dma_csr,
index 85bc33b218d946f557647d1a73f9699e3189827b..bbd52f02330b204aa0838ecd589a4f2a32252759 100644 (file)
@@ -50,6 +50,7 @@ struct sgdma_descrip {
        u8      control;
 } __packed;
 
+#define SGDMA_DESC_LEN sizeof(struct sgdma_descrip)
 
 #define SGDMA_STATUS_ERR               BIT(0)
 #define SGDMA_STATUS_LENGTH_ERR                BIT(1)
index 2adb24d4523c915d3b7d87f1294ead36757cea50..103c30ddddf7004c4ea41285c19cfaebc0843817 100644 (file)
@@ -458,7 +458,6 @@ struct altera_tse_private {
        u32 rxctrlreg;
        dma_addr_t rxdescphys;
        dma_addr_t txdescphys;
-       size_t sgdmadesclen;
 
        struct list_head txlisthd;
        struct list_head rxlisthd;
index 299eb4315fe647ba8d67302649a2cf928a4d59d5..4f68d19c45bda1f7a628dcb9f1172d8e81269826 100644 (file)
@@ -905,40 +905,6 @@ static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pda
        return ret;
 }
 
-static int xgene_get_mac_address(struct device *dev,
-                                unsigned char *addr)
-{
-       int ret;
-
-       ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
-       if (ret)
-               ret = device_property_read_u8_array(dev, "mac-address",
-                                                   addr, 6);
-       if (ret)
-               return -ENODEV;
-
-       return ETH_ALEN;
-}
-
-static int xgene_get_phy_mode(struct device *dev)
-{
-       int i, ret;
-       char *modestr;
-
-       ret = device_property_read_string(dev, "phy-connection-type",
-                                         (const char **)&modestr);
-       if (ret)
-               ret = device_property_read_string(dev, "phy-mode",
-                                                 (const char **)&modestr);
-       if (ret)
-               return -ENODEV;
-
-       for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
-               if (!strcasecmp(modestr, phy_modes(i)))
-                       return i;
-       }
-       return -ENODEV;
-}
 
 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 {
@@ -998,12 +964,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        if (ret)
                return ret;
 
-       if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
+       if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
                eth_hw_addr_random(ndev);
 
        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
 
-       pdata->phy_mode = xgene_get_phy_mode(dev);
+       pdata->phy_mode = device_get_phy_mode(dev);
        if (pdata->phy_mode < 0) {
                dev_err(dev, "Unable to get phy-connection-type\n");
                return pdata->phy_mode;
index 932bd1862f7adeb7a95cec8ae1efda76edb7a728..2795d6db10e1897e3c3c3b367e28e05ea33858c5 100644 (file)
@@ -874,6 +874,8 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
                atl1c_clean_buffer(pdev, buffer_info);
        }
 
+       netdev_reset_queue(adapter->netdev);
+
        /* Zero out Tx-buffers */
        memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
                ring_count);
@@ -1551,6 +1553,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
        u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
        u16 hw_next_to_clean;
        u16 reg;
+       unsigned int total_bytes = 0, total_packets = 0;
 
        reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
 
@@ -1558,12 +1561,18 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
 
        while (next_to_clean != hw_next_to_clean) {
                buffer_info = &tpd_ring->buffer_info[next_to_clean];
+               if (buffer_info->skb) {
+                       total_bytes += buffer_info->skb->len;
+                       total_packets++;
+               }
                atl1c_clean_buffer(pdev, buffer_info);
                if (++next_to_clean == tpd_ring->count)
                        next_to_clean = 0;
                atomic_set(&tpd_ring->next_to_clean, next_to_clean);
        }
 
+       netdev_completed_queue(adapter->netdev, total_packets, total_bytes);
+
        if (netif_queue_stopped(adapter->netdev) &&
                        netif_carrier_ok(adapter->netdev)) {
                netif_wake_queue(adapter->netdev);
@@ -2256,6 +2265,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
                spin_unlock_irqrestore(&adapter->tx_lock, flags);
                dev_kfree_skb_any(skb);
        } else {
+               netdev_sent_queue(adapter->netdev, skb->len);
                atl1c_tx_queue(adapter, skb, tpd, type);
                spin_unlock_irqrestore(&adapter->tx_lock, flags);
        }
index 8be9eab733203c2817fe916b38dd6f511e8e91f4..e930aa9a3cfb8ed3c4edad3fc03ae2b3ce6a586f 100644 (file)
@@ -139,6 +139,16 @@ config BNX2X_SRIOV
          Virtualization support in the 578xx and 57712 products. This
          allows for virtual function acceleration in virtual environments.
 
+config BNX2X_VXLAN
+       bool "Virtual eXtensible Local Area Network support"
+       default n
+       depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
+       ---help---
+         This enables hardward offload support for VXLAN protocol over the
+         NetXtremeII series adapters.
+         Say Y here if you want to enable hardware offload support for
+         Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
 config BGMAC
        tristate "BCMA bus GBit core support"
        depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
index 5762c485ea06e75305a88784e0b34816680e164e..ba936635322a83eee32f15e49f12e393ed924d38 100644 (file)
@@ -1392,6 +1392,8 @@ enum sp_rtnl_flag {
        BNX2X_SP_RTNL_HYPERVISOR_VLAN,
        BNX2X_SP_RTNL_TX_STOP,
        BNX2X_SP_RTNL_GET_DRV_VERSION,
+       BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+       BNX2X_SP_RTNL_DEL_VXLAN_PORT,
 };
 
 enum bnx2x_iov_flag {
@@ -2571,6 +2573,10 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
                        (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) ||       \
                         IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
 
+/* Determines whether BW configuration arrives in 100Mb units or in
+ * percentages from actual physical link speed.
+ */
+#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
 
 #define SET_FLAG(value, mask, flag) \
        do {\
index 1637de6caf46b5213e3148d5d3e44309e1f1483d..44173be5cbf0d914111304ba2420954f44d5a7e7 100644 (file)
@@ -264,9 +264,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        if (likely(skb)) {
                (*pkts_compl)++;
                (*bytes_compl) += skb->len;
+               dev_kfree_skb_any(skb);
        }
 
-       dev_kfree_skb_any(skb);
        tx_buf->first_bd = 0;
        tx_buf->skb = NULL;
 
@@ -1190,7 +1190,7 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
                /* Calculate the current MAX line speed limit for the MF
                 * devices
                 */
-               if (IS_MF_SI(bp))
+               if (IS_MF_PERCENT_BW(bp))
                        line_speed = (line_speed * maxCfg) / 100;
                else { /* SD mode */
                        u16 vn_max_rate = maxCfg * 100;
index fa7c532012654eb05ad42fce20e92c6ab03cef32..b7d32e8412f14b8f9c3a71d4ff29d750a0d730d7 100644 (file)
@@ -967,6 +967,8 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
        else /* CHIP_IS_E1X */
                start_params->network_cos_mode = FW_WRR;
 
+       start_params->vxlan_dst_port = bp->vxlan_dst_port;
+
        start_params->inner_rss = 1;
 
        if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
@@ -1386,4 +1388,16 @@ void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
  * @state:     OS_DRIVER_STATE_* value reflecting current driver state
  */
 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
+
+/**
+ * bnx2x_nvram_read - reads data from nvram [might sleep]
+ *
+ * @bp:                driver handle
+ * @offset:    byte offset in nvram
+ * @ret_buf:   pointer to buffer where data is to be stored
+ * @buf_size:   Length of 'ret_buf' in bytes
+ */
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+                    int buf_size);
+
 #endif /* BNX2X_CMN_H */
index 6b2050a198df8ebd43fb29ec4176424491f29ac8..aeb7ce64452e14cd3cbe49325f63bae2d99e3ef2 100644 (file)
@@ -1348,8 +1348,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
        return rc;
 }
 
-static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
-                           int buf_size)
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+                    int buf_size)
 {
        int rc;
        u32 cmd_flags;
@@ -1723,6 +1723,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
                offset += sizeof(u32);
                data_buf += sizeof(u32);
                written_so_far += sizeof(u32);
+
+               /* At end of each 4Kb page, release nvram lock to allow MFW
+                * chance to take it for its own use.
+                */
+               if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+                   (written_so_far < buf_size)) {
+                       DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+                          "Releasing NVM lock after offset 0x%x\n",
+                          (u32)(offset - sizeof(u32)));
+                       bnx2x_release_nvram_lock(bp);
+                       usleep_range(1000, 2000);
+                       rc = bnx2x_acquire_nvram_lock(bp);
+                       if (rc)
+                               return rc;
+               }
+
                cmd_flags = 0;
        }
 
index 08a08fa49caad3fb8850b0f92f4285c43a667835..cafd5de675cf3836bd9f1a9776c5dfc4d6d5bdbd 100644 (file)
@@ -2075,6 +2075,25 @@ enum curr_cfg_method_e {
        CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
 };
 
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct bdn_npiv_settings {
+       u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+       u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct bdn_fc_npiv_cfg {
+       /* hdr used internally by the MFW */
+       u32 hdr;
+       u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct bdn_fc_npiv_tbl {
+       struct bdn_fc_npiv_cfg fc_npiv_cfg;
+       struct bdn_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
 struct mdump_driver_info {
        u32 epoc;
        u32 drv_ver;
index 31c63aa2252166a4a9fb5d811735d764e8d1d082..26fbfcc6f7db8d17d9d9ee32cb6171cc62d8516a 100644 (file)
@@ -2494,7 +2494,7 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
        else {
                u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 
-               if (IS_MF_SI(bp)) {
+               if (IS_MF_PERCENT_BW(bp)) {
                        /* maxCfg in percents of linkspeed */
                        vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
                } else /* SD modes */
@@ -10075,6 +10075,81 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
        }
 }
 
+#ifdef CONFIG_BNX2X_VXLAN
+static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
+{
+       struct bnx2x_func_switch_update_params *switch_update_params;
+       struct bnx2x_func_state_params func_params = {NULL};
+       int rc;
+
+       switch_update_params = &func_params.params.switch_update;
+
+       /* Prepare parameters for function state transitions */
+       __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+       __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+       func_params.f_obj = &bp->func_obj;
+       func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+       /* Function parameters */
+       __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+                 &switch_update_params->changes);
+       switch_update_params->vxlan_dst_port = port;
+       rc = bnx2x_func_state_change(bp, &func_params);
+       if (rc)
+               BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
+                         port, rc);
+       return rc;
+}
+
+static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
+{
+       if (!netif_running(bp->dev))
+               return;
+
+       if (bp->vxlan_dst_port || !IS_PF(bp)) {
+               DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
+               return;
+       }
+
+       bp->vxlan_dst_port = port;
+       bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
+}
+
+static void bnx2x_add_vxlan_port(struct net_device *netdev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct bnx2x *bp = netdev_priv(netdev);
+       u16 t_port = ntohs(port);
+
+       __bnx2x_add_vxlan_port(bp, t_port);
+}
+
+static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
+{
+       if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) {
+               DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
+               return;
+       }
+
+       if (netif_running(bp->dev)) {
+               bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
+       } else {
+               bp->vxlan_dst_port = 0;
+               netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
+       }
+}
+
+static void bnx2x_del_vxlan_port(struct net_device *netdev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct bnx2x *bp = netdev_priv(netdev);
+       u16 t_port = ntohs(port);
+
+       __bnx2x_del_vxlan_port(bp, t_port);
+}
+#endif
+
 static int bnx2x_close(struct net_device *dev);
 
 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
@@ -10083,6 +10158,9 @@ static int bnx2x_close(struct net_device *dev);
 static void bnx2x_sp_rtnl_task(struct work_struct *work)
 {
        struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+#ifdef CONFIG_BNX2X_VXLAN
+       u16 port;
+#endif
 
        rtnl_lock();
 
@@ -10181,6 +10259,27 @@ sp_rtnl_not_reset:
                               &bp->sp_rtnl_state))
                bnx2x_update_mng_version(bp);
 
+#ifdef CONFIG_BNX2X_VXLAN
+       port = bp->vxlan_dst_port;
+       if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+                              &bp->sp_rtnl_state)) {
+               if (!bnx2x_vxlan_port_update(bp, port))
+                       netdev_info(bp->dev, "Added vxlan dest port %d", port);
+               else
+                       bp->vxlan_dst_port = 0;
+       }
+
+       if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+                              &bp->sp_rtnl_state)) {
+               if (!bnx2x_vxlan_port_update(bp, 0)) {
+                       netdev_info(bp->dev,
+                                   "Deleted vxlan dest port %d", port);
+                       bp->vxlan_dst_port = 0;
+                       vxlan_get_rx_port(bp->dev);
+               }
+       }
+#endif
+
        /* work which needs rtnl lock not-taken (as it takes the lock itself and
         * can be called from other contexts as well)
         */
@@ -12379,6 +12478,12 @@ static int bnx2x_open(struct net_device *dev)
        rc = bnx2x_nic_load(bp, LOAD_OPEN);
        if (rc)
                return rc;
+
+#ifdef CONFIG_BNX2X_VXLAN
+       if (IS_PF(bp))
+               vxlan_get_rx_port(dev);
+#endif
+
        return 0;
 }
 
@@ -12894,6 +12999,10 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
        .ndo_set_vf_link_state  = bnx2x_set_vf_link_state,
        .ndo_features_check     = bnx2x_features_check,
+#ifdef CONFIG_BNX2X_VXLAN
+       .ndo_add_vxlan_port     = bnx2x_add_vxlan_port,
+       .ndo_del_vxlan_port     = bnx2x_del_vxlan_port,
+#endif
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -14653,6 +14762,90 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                rc = -EINVAL;
        }
 
+       /* For storage-only interfaces, change driver state */
+       if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
+               switch (ctl->drv_state) {
+               case DRV_NOP:
+                       break;
+               case DRV_ACTIVE:
+                       bnx2x_set_os_driver_state(bp,
+                                                 OS_DRIVER_STATE_ACTIVE);
+                       break;
+               case DRV_INACTIVE:
+                       bnx2x_set_os_driver_state(bp,
+                                                 OS_DRIVER_STATE_DISABLED);
+                       break;
+               case DRV_UNLOADED:
+                       bnx2x_set_os_driver_state(bp,
+                                                 OS_DRIVER_STATE_NOT_LOADED);
+                       break;
+               default:
+               BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
+               }
+       }
+
+       return rc;
+}
+
+static int bnx2x_get_fc_npiv(struct net_device *dev,
+                            struct cnic_fc_npiv_tbl *cnic_tbl)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       struct bdn_fc_npiv_tbl *tbl = NULL;
+       u32 offset, entries;
+       int rc = -EINVAL;
+       int i;
+
+       if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
+               goto out;
+
+       DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
+
+       tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+       if (!tbl) {
+               BNX2X_ERR("Failed to allocate fc_npiv table\n");
+               goto out;
+       }
+
+       offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
+       DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
+
+       /* Read the table contents from nvram */
+       if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
+               BNX2X_ERR("Failed to read FC-NPIV table\n");
+               goto out;
+       }
+
+       /* Since bnx2x_nvram_read() returns data in be32, we need to convert
+        * the number of entries back to cpu endianness.
+        */
+       entries = tbl->fc_npiv_cfg.num_of_npiv;
+       entries = (__force u32)be32_to_cpu((__force __be32)entries);
+       tbl->fc_npiv_cfg.num_of_npiv = entries;
+
+       if (!tbl->fc_npiv_cfg.num_of_npiv) {
+               DP(BNX2X_MSG_MCP,
+                  "No FC-NPIV table [valid, simply not present]\n");
+               goto out;
+       } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
+               BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
+                         tbl->fc_npiv_cfg.num_of_npiv);
+               goto out;
+       } else {
+               DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
+                  tbl->fc_npiv_cfg.num_of_npiv);
+       }
+
+       /* Copy the data into cnic-provided struct */
+       cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
+       for (i = 0; i < cnic_tbl->count; i++) {
+               memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
+               memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
+       }
+
+       rc = 0;
+out:
+       kfree(tbl);
        return rc;
 }
 
@@ -14798,6 +14991,7 @@ static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
        cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
        cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
        cp->drv_ctl = bnx2x_drv_ctl;
+       cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
        cp->drv_register_cnic = bnx2x_register_cnic;
        cp->drv_unregister_cnic = bnx2x_unregister_cnic;
        cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
index 17c145fdf3ff6f40e2ef25098e1c0b423dcf9c5f..b69dc58faeab5b74ffc3142341efea084d83706d 100644 (file)
@@ -192,6 +192,7 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
        struct drv_ctl_info info;
        struct drv_ctl_io *io = &info.data.io;
 
+       memset(&info, 0, sizeof(struct drv_ctl_info));
        info.cmd = DRV_CTL_CTX_WR_CMD;
        io->cid_addr = cid_addr;
        io->offset = off;
@@ -206,6 +207,7 @@ static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
        struct drv_ctl_info info;
        struct drv_ctl_io *io = &info.data.io;
 
+       memset(&info, 0, sizeof(struct drv_ctl_info));
        info.cmd = DRV_CTL_CTXTBL_WR_CMD;
        io->offset = off;
        io->dma_addr = addr;
@@ -219,6 +221,7 @@ static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
        struct drv_ctl_info info;
        struct drv_ctl_l2_ring *ring = &info.data.ring;
 
+       memset(&info, 0, sizeof(struct drv_ctl_info));
        if (start)
                info.cmd = DRV_CTL_START_L2_CMD;
        else
@@ -236,6 +239,7 @@ static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
        struct drv_ctl_info info;
        struct drv_ctl_io *io = &info.data.io;
 
+       memset(&info, 0, sizeof(struct drv_ctl_info));
        info.cmd = DRV_CTL_IO_WR_CMD;
        io->offset = off;
        io->data = val;
@@ -249,13 +253,14 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
        struct drv_ctl_info info;
        struct drv_ctl_io *io = &info.data.io;
 
+       memset(&info, 0, sizeof(struct drv_ctl_info));
        info.cmd = DRV_CTL_IO_RD_CMD;
        io->offset = off;
        ethdev->drv_ctl(dev->netdev, &info);
        return io->data;
 }
 
-static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
 {
        struct cnic_local *cp = dev->cnic_priv;
        struct cnic_eth_dev *ethdev = cp->ethdev;
@@ -263,6 +268,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
        struct fcoe_capabilities *fcoe_cap =
                &info.data.register_data.fcoe_features;
 
+       memset(&info, 0, sizeof(struct drv_ctl_info));
        if (reg) {
                info.cmd = DRV_CTL_ULP_REGISTER_CMD;
                if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
@@ -272,6 +278,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
        }
 
        info.data.ulp_type = ulp_type;
+       info.drv_state = state;
        ethdev->drv_ctl(dev->netdev, &info);
 }
 
@@ -286,6 +293,7 @@ static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
        struct cnic_eth_dev *ethdev = cp->ethdev;
        struct drv_ctl_info info;
 
+       memset(&info, 0, sizeof(struct drv_ctl_info));
        info.cmd = cmd;
        info.data.credit.credit_count = count;
        ethdev->drv_ctl(dev->netdev, &info);
@@ -591,7 +599,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 
        mutex_unlock(&cnic_lock);
 
-       cnic_ulp_ctl(dev, ulp_type, true);
+       cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
 
        return 0;
 
@@ -636,7 +644,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
        if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
                netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 
-       cnic_ulp_ctl(dev, ulp_type, false);
+       if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+               cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
+       else
+               cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
 
        return 0;
 }
@@ -4267,6 +4278,7 @@ static void cnic_delete_task(struct work_struct *work)
 
                cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
 
+               memset(&info, 0, sizeof(struct drv_ctl_info));
                info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
                cp->ethdev->drv_ctl(dev->netdev, &info);
        }
@@ -5433,6 +5445,23 @@ static void cnic_free_dev(struct cnic_dev *dev)
        kfree(dev);
 }
 
+static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
+                               struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct bnx2x *bp = netdev_priv(dev->netdev);
+       int ret;
+
+       if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+               return -EAGAIN;     /* bnx2x is down */
+
+       if (!BNX2X_CHIP_IS_E2_PLUS(bp))
+               return -EINVAL;
+
+       ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
+       return ret;
+}
+
 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
                                       struct pci_dev *pdev)
 {
@@ -5451,6 +5480,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
        cdev->register_device = cnic_register_device;
        cdev->unregister_device = cnic_unregister_device;
        cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+       cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
 
        cp = cdev->cnic_priv;
        cp->dev = cdev;
index ef6125b0ee3ed35fbd96dc0a2c5647fa0894e3b0..789e5c7e93116ab29d944e57296d511789bfedb0 100644 (file)
@@ -15,8 +15,8 @@
 
 #include "bnx2x/bnx2x_mfw_req.h"
 
-#define CNIC_MODULE_VERSION    "2.5.21"
-#define CNIC_MODULE_RELDATE    "January 29, 2015"
+#define CNIC_MODULE_VERSION    "2.5.22"
+#define CNIC_MODULE_RELDATE    "July 20, 2015"
 
 #define CNIC_ULP_RDMA          0
 #define CNIC_ULP_ISCSI         1
@@ -151,6 +151,11 @@ struct drv_ctl_register_data {
 
 struct drv_ctl_info {
        int     cmd;
+       int     drv_state;
+#define DRV_NOP                0
+#define DRV_ACTIVE     1
+#define DRV_INACTIVE   2
+#define DRV_UNLOADED   3
        union {
                struct drv_ctl_spq_credit credit;
                struct drv_ctl_io io;
@@ -161,6 +166,15 @@ struct drv_ctl_info {
        } data;
 };
 
+#define MAX_NPIV_ENTRIES 64
+#define FC_NPIV_WWN_SIZE 8
+
+struct cnic_fc_npiv_tbl {
+       u8 wwpn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+       u8 wwnn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+       u32 count;
+};
+
 struct cnic_ops {
        struct module   *cnic_owner;
        /* Calls to these functions are protected by RCU.  When
@@ -226,6 +240,8 @@ struct cnic_eth_dev {
        int             (*drv_submit_kwqes_16)(struct net_device *,
                                               struct kwqe_16 *[], u32);
        int             (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+       int             (*drv_get_fc_npiv_tbl)(struct net_device *,
+                                              struct cnic_fc_npiv_tbl *);
        unsigned long   reserved1[2];
        union drv_info_to_mcp   *addr_drv_info_to_mcp;
 };
@@ -314,6 +330,7 @@ struct cnic_dev {
        struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
        int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
                                 char *data, u16 data_size);
+       int (*get_fc_npiv_tbl)(struct cnic_dev *, struct cnic_fc_npiv_tbl *);
        unsigned long   flags;
 #define CNIC_F_CNIC_UP         1
 #define CNIC_F_BNX2_CLASS      3
index 0612b19f6313bd3e6ffa205be2b543585262e31e..506047c386071db9472d60218faa004fe94849a8 100644 (file)
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
                        if (!next_cmpl->valid)
                                break;
                }
+               packets++;
 
                /* TODO: BNA_CQ_EF_LOCAL ? */
                if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
                else
                        bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
 
-               packets++;
                rcb->rxq->rx_packets++;
                rcb->rxq->rx_bytes += totlen;
                ccb->bytes_per_intr += totlen;
index 3584420878782aa72800072e28997d40280dccec..9b35d142f47accfbaec039f0d8100fb45d68bbf8 100644 (file)
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
 config THUNDER_NIC_PF
        tristate "Thunder Physical function driver"
        depends on 64BIT
-       default ARCH_THUNDER
        select THUNDER_NIC_BGX
        ---help---
          This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
 config THUNDER_NIC_VF
        tristate "Thunder Virtual function driver"
        depends on 64BIT
-       default ARCH_THUNDER
        ---help---
          This driver supports Thunder's NIC virtual function
 
 config THUNDER_NIC_BGX
        tristate "Thunder MAC interface driver (BGX)"
        depends on 64BIT
-       default ARCH_THUNDER
        select PHYLIB
        select MDIO_OCTEON
        ---help---
index b961a89dc6264555553ee94484478b5dca9822dc..5e541862f65e52a14be8d6e1de95b3eca5efaf2f 100644 (file)
@@ -6,6 +6,7 @@
  * as published by the Free Software Foundation.
  */
 
+#include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
@@ -26,7 +27,7 @@
 struct lmac {
        struct bgx              *bgx;
        int                     dmac;
-       unsigned char           mac[ETH_ALEN];
+       u8                      mac[ETH_ALEN];
        bool                    link_up;
        int                     lmacid; /* ID within BGX */
        int                     lmacid_bd; /* ID on board */
@@ -835,18 +836,108 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
        }
 }
 
-static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+#ifdef CONFIG_ACPI
+
+static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
+{
+       u8 mac[ETH_ALEN];
+       int ret;
+
+       ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
+                                           "mac-address", mac, ETH_ALEN);
+       if (ret)
+               goto out;
+
+       if (!is_valid_ether_addr(mac)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       memcpy(dst, mac, ETH_ALEN);
+out:
+       return ret;
+}
+
+/* Currently only sets the MAC address. */
+static acpi_status bgx_acpi_register_phy(acpi_handle handle,
+                                        u32 lvl, void *context, void **rv)
+{
+       struct bgx *bgx = context;
+       struct acpi_device *adev;
+
+       if (acpi_bus_get_device(handle, &adev))
+               goto out;
+
+       acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
+
+       SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
+
+       bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+out:
+       bgx->lmac_count++;
+       return AE_OK;
+}
+
+static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
+                                    void *context, void **ret_val)
+{
+       struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct bgx *bgx = context;
+       char bgx_sel[5];
+
+       snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+       if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
+               pr_warn("Invalid link device\n");
+               return AE_OK;
+       }
+
+       if (strncmp(string.pointer, bgx_sel, 4))
+               return AE_OK;
+
+       acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+                           bgx_acpi_register_phy, NULL, bgx, NULL);
+
+       kfree(string.pointer);
+       return AE_CTRL_TERMINATE;
+}
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
 {
+       acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
+       return 0;
+}
+
+#else
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+       return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+       struct device_node *np;
        struct device_node *np_child;
        u8 lmac = 0;
+       char bgx_sel[5];
+       const char *mac;
 
-       for_each_child_of_node(np, np_child) {
-               struct device_node *phy_np;
-               const char *mac;
+       /* Get BGX node from DT */
+       snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+       np = of_find_node_by_name(NULL, bgx_sel);
+       if (!np)
+               return -ENODEV;
 
-               phy_np = of_parse_phandle(np_child, "phy-handle", 0);
-               if (phy_np)
-                       bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+       for_each_child_of_node(np, np_child) {
+               struct device_node *phy_np = of_parse_phandle(np_child,
+                                                             "phy-handle", 0);
+               if (!phy_np)
+                       continue;
+               bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
 
                mac = of_get_mac_address(np_child);
                if (mac)
@@ -858,6 +949,24 @@ static void bgx_init_of(struct bgx *bgx, struct device_node *np)
                if (lmac == MAX_LMAC_PER_BGX)
                        break;
        }
+       return 0;
+}
+
+#else
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+       return -ENODEV;
+}
+
+#endif /* CONFIG_OF_MDIO */
+
+static int bgx_init_phy(struct bgx *bgx)
+{
+       if (!acpi_disabled)
+               return bgx_init_acpi_phy(bgx);
+
+       return bgx_init_of_phy(bgx);
 }
 
 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -865,8 +974,6 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int err;
        struct device *dev = &pdev->dev;
        struct bgx *bgx = NULL;
-       struct device_node *np;
-       char bgx_sel[5];
        u8 lmac;
 
        bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
@@ -902,10 +1009,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        bgx_vnic[bgx->bgx_id] = bgx;
        bgx_get_qlm_mode(bgx);
 
-       snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
-       np = of_find_node_by_name(NULL, bgx_sel);
-       if (np)
-               bgx_init_of(bgx, np);
+       err = bgx_init_phy(bgx);
+       if (err)
+               goto err_enable;
 
        bgx_init_hw(bgx);
 
index 58de4443eac0318234fd6b6ef46384243f14d7da..3c99454aac0af1f844d797e2683fcddd828c2250 100644 (file)
@@ -768,6 +768,10 @@ struct adapter {
 
        struct dentry *debugfs_root;
        u32 use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
+       u32 trace_rss;  /* 1 implies that different RSS flit per filter is
+                        * used per filter else if 0 default RSS flit is
+                        * used for all 4 filters.
+                        */
 
        spinlock_t stats_lock;
        spinlock_t win0_lock ____cacheline_aligned_in_smp;
@@ -1441,6 +1445,10 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 void t4_db_full(struct adapter *adapter);
 void t4_db_dropped(struct adapter *adapter);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+                       int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+                        int filter_index, int *enabled);
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
                         u32 addr, u32 val);
 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
index b6577349cf4e3beeec71ffa6440b0911b5ee0aa3..0a87a3247464fdd1939d8bcb5867a9f0e735cf02 100644 (file)
@@ -346,11 +346,11 @@ static int cim_qcfg_show(struct seq_file *seq, void *v)
                if (is_t4(adap->params.chip)) {
                        i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
                                        ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
-                               wr = obq_wr_t4;
+                       wr = obq_wr_t4;
                } else {
                        i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
                                        ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
-                               wr = obq_wr_t5;
+                       wr = obq_wr_t5;
                }
        }
        if (i)
@@ -1201,6 +1201,299 @@ static const struct file_operations mbox_debugfs_fops = {
        .write   = mbox_write
 };
 
+static int mps_trc_show(struct seq_file *seq, void *v)
+{
+       int enabled, i;
+       struct trace_params tp;
+       unsigned int trcidx = (uintptr_t)seq->private & 3;
+       struct adapter *adap = seq->private - trcidx;
+
+       t4_get_trace_filter(adap, &tp, trcidx, &enabled);
+       if (!enabled) {
+               seq_puts(seq, "tracer is disabled\n");
+               return 0;
+       }
+
+       if (tp.skip_ofst * 8 >= TRACE_LEN) {
+               dev_err(adap->pdev_dev, "illegal trace pattern skip offset\n");
+               return -EINVAL;
+       }
+       if (tp.port < 8) {
+               i = adap->chan_map[tp.port & 3];
+               if (i >= MAX_NPORTS) {
+                       dev_err(adap->pdev_dev, "tracer %u is assigned "
+                               "to non-existing port\n", trcidx);
+                       return -EINVAL;
+               }
+               seq_printf(seq, "tracer is capturing %s %s, ",
+                          adap->port[i]->name, tp.port < 4 ? "Rx" : "Tx");
+       } else
+               seq_printf(seq, "tracer is capturing loopback %d, ",
+                          tp.port - 8);
+       seq_printf(seq, "snap length: %u, min length: %u\n", tp.snap_len,
+                  tp.min_len);
+       seq_printf(seq, "packets captured %smatch filter\n",
+                  tp.invert ? "do not " : "");
+
+       if (tp.skip_ofst) {
+               seq_puts(seq, "filter pattern: ");
+               for (i = 0; i < tp.skip_ofst * 2; i += 2)
+                       seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+               seq_putc(seq, '/');
+               for (i = 0; i < tp.skip_ofst * 2; i += 2)
+                       seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+               seq_puts(seq, "@0\n");
+       }
+
+       seq_puts(seq, "filter pattern: ");
+       for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+               seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+       seq_putc(seq, '/');
+       for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+               seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+       seq_printf(seq, "@%u\n", (tp.skip_ofst + tp.skip_len) * 8);
+       return 0;
+}
+
+static int mps_trc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mps_trc_show, inode->i_private);
+}
+
+static unsigned int xdigit2int(unsigned char c)
+{
+       return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+#define TRC_PORT_NONE 0xff
+#define TRC_RSS_ENABLE 0x33
+#define TRC_RSS_DISABLE 0x13
+
+/* Set an MPS trace filter.  Syntax is:
+ *
+ * disable
+ *
+ * to disable tracing, or
+ *
+ * interface qid=<qid no> [snaplen=<val>] [minlen=<val>] [not] [<pattern>]...
+ *
+ * where interface is one of rxN, txN, or loopbackN, N = 0..3, qid can be one
+ * of the NIC's response qid obtained from sge_qinfo and pattern has the form
+ *
+ * <pattern data>[/<pattern mask>][@<anchor>]
+ *
+ * Up to 2 filter patterns can be specified.  If 2 are supplied the first one
+ * must be anchored at 0.  An omited mask is taken as a mask of 1s, an omitted
+ * anchor is taken as 0.
+ */
+static ssize_t mps_trc_write(struct file *file, const char __user *buf,
+                            size_t count, loff_t *pos)
+{
+       int i, enable, ret;
+       u32 *data, *mask;
+       struct trace_params tp;
+       const struct inode *ino;
+       unsigned int trcidx;
+       char *s, *p, *word, *end;
+       struct adapter *adap;
+       u32 j;
+
+       ino = file_inode(file);
+       trcidx = (uintptr_t)ino->i_private & 3;
+       adap = ino->i_private - trcidx;
+
+       /* Don't accept input more than 1K, can't be anything valid except lots
+        * of whitespace.  Well, use less.
+        */
+       if (count > 1024)
+               return -EFBIG;
+       p = s = kzalloc(count + 1, GFP_USER);
+       if (!s)
+               return -ENOMEM;
+       if (copy_from_user(s, buf, count)) {
+               count = -EFAULT;
+               goto out;
+       }
+
+       if (s[count - 1] == '\n')
+               s[count - 1] = '\0';
+
+       enable = strcmp("disable", s) != 0;
+       if (!enable)
+               goto apply;
+
+       /* enable or disable trace multi rss filter */
+       if (adap->trace_rss)
+               t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_ENABLE);
+       else
+               t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_DISABLE);
+
+       memset(&tp, 0, sizeof(tp));
+       tp.port = TRC_PORT_NONE;
+       i = 0;  /* counts pattern nibbles */
+
+       while (p) {
+               while (isspace(*p))
+                       p++;
+               word = strsep(&p, " ");
+               if (!*word)
+                       break;
+
+               if (!strncmp(word, "qid=", 4)) {
+                       end = (char *)word + 4;
+                       ret = kstrtouint(end, 10, &j);
+                       if (ret)
+                               goto out;
+                       if (!adap->trace_rss) {
+                               t4_write_reg(adap, MPS_T5_TRC_RSS_CONTROL_A, j);
+                               continue;
+                       }
+
+                       switch (trcidx) {
+                       case 0:
+                               t4_write_reg(adap, MPS_TRC_RSS_CONTROL_A, j);
+                               break;
+                       case 1:
+                               t4_write_reg(adap,
+                                            MPS_TRC_FILTER1_RSS_CONTROL_A, j);
+                               break;
+                       case 2:
+                               t4_write_reg(adap,
+                                            MPS_TRC_FILTER2_RSS_CONTROL_A, j);
+                               break;
+                       case 3:
+                               t4_write_reg(adap,
+                                            MPS_TRC_FILTER3_RSS_CONTROL_A, j);
+                               break;
+                       }
+                       continue;
+               }
+               if (!strncmp(word, "snaplen=", 8)) {
+                       end = (char *)word + 8;
+                       ret = kstrtouint(end, 10, &j);
+                       if (ret || j > 9600) {
+inval:                         count = -EINVAL;
+                               goto out;
+                       }
+                       tp.snap_len = j;
+                       continue;
+               }
+               if (!strncmp(word, "minlen=", 7)) {
+                       end = (char *)word + 7;
+                       ret = kstrtouint(end, 10, &j);
+                       if (ret || j > TFMINPKTSIZE_M)
+                               goto inval;
+                       tp.min_len = j;
+                       continue;
+               }
+               if (!strcmp(word, "not")) {
+                       tp.invert = !tp.invert;
+                       continue;
+               }
+               if (!strncmp(word, "loopback", 8) && tp.port == TRC_PORT_NONE) {
+                       if (word[8] < '0' || word[8] > '3' || word[9])
+                               goto inval;
+                       tp.port = word[8] - '0' + 8;
+                       continue;
+               }
+               if (!strncmp(word, "tx", 2) && tp.port == TRC_PORT_NONE) {
+                       if (word[2] < '0' || word[2] > '3' || word[3])
+                               goto inval;
+                       tp.port = word[2] - '0' + 4;
+                       if (adap->chan_map[tp.port & 3] >= MAX_NPORTS)
+                               goto inval;
+                       continue;
+               }
+               if (!strncmp(word, "rx", 2) && tp.port == TRC_PORT_NONE) {
+                       if (word[2] < '0' || word[2] > '3' || word[3])
+                               goto inval;
+                       tp.port = word[2] - '0';
+                       if (adap->chan_map[tp.port] >= MAX_NPORTS)
+                               goto inval;
+                       continue;
+               }
+               if (!isxdigit(*word))
+                       goto inval;
+
+               /* we have found a trace pattern */
+               if (i) {                            /* split pattern */
+                       if (tp.skip_len)            /* too many splits */
+                               goto inval;
+                       tp.skip_ofst = i / 16;
+               }
+
+               data = &tp.data[i / 8];
+               mask = &tp.mask[i / 8];
+               j = i;
+
+               while (isxdigit(*word)) {
+                       if (i >= TRACE_LEN * 2) {
+                               count = -EFBIG;
+                               goto out;
+                       }
+                       *data = (*data << 4) + xdigit2int(*word++);
+                       if (++i % 8 == 0)
+                               data++;
+               }
+               if (*word == '/') {
+                       word++;
+                       while (isxdigit(*word)) {
+                               if (j >= i)         /* mask longer than data */
+                                       goto inval;
+                               *mask = (*mask << 4) + xdigit2int(*word++);
+                               if (++j % 8 == 0)
+                                       mask++;
+                       }
+                       if (i != j)                 /* mask shorter than data */
+                               goto inval;
+               } else {                            /* no mask, use all 1s */
+                       for ( ; i - j >= 8; j += 8)
+                               *mask++ = 0xffffffff;
+                       if (i % 8)
+                               *mask = (1 << (i % 8) * 4) - 1;
+               }
+               if (*word == '@') {
+                       end = (char *)word + 1;
+                       ret = kstrtouint(end, 10, &j);
+                       if (*end && *end != '\n')
+                               goto inval;
+                       if (j & 7)          /* doesn't start at multiple of 8 */
+                               goto inval;
+                       j /= 8;
+                       if (j < tp.skip_ofst)     /* overlaps earlier pattern */
+                               goto inval;
+                       if (j - tp.skip_ofst > 31)            /* skip too big */
+                               goto inval;
+                       tp.skip_len = j - tp.skip_ofst;
+               }
+               if (i % 8) {
+                       *data <<= (8 - i % 8) * 4;
+                       *mask <<= (8 - i % 8) * 4;
+                       i = (i + 15) & ~15;         /* 8-byte align */
+               }
+       }
+
+       if (tp.port == TRC_PORT_NONE)
+               goto inval;
+
+apply:
+       i = t4_set_trace_filter(adap, &tp, trcidx, enable);
+       if (i)
+               count = i;
+out:
+       kfree(s);
+       return count;
+}
+
+static const struct file_operations mps_trc_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = mps_trc_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = single_release,
+       .write   = mps_trc_write
+};
+
 static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
                          loff_t *ppos)
 {
@@ -1943,13 +2236,13 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
 {
        struct adapter *adap = seq->private;
        int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
-       int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+       int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
        int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
        int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
        int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
        int i, r = (uintptr_t)v - 1;
-       int toe_idx = r - eth_entries;
-       int rdma_idx = toe_idx - toe_entries;
+       int iscsi_idx = r - eth_entries;
+       int rdma_idx = iscsi_idx - iscsi_entries;
        int ciq_idx = rdma_idx - rdma_entries;
        int ctrl_idx =  ciq_idx - ciq_entries;
        int fq_idx =  ctrl_idx - ctrl_entries;
@@ -1965,8 +2258,12 @@ do { \
                seq_putc(seq, '\n'); \
 } while (0)
 #define S(s, v) S3("s", s, v)
+#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
 #define T(s, v) S3("u", s, tx[i].v)
+#define TL(s, v) T3("lu", s, v)
+#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
 #define R(s, v) S3("u", s, rx[i].v)
+#define RL(s, v) R3("lu", s, v)
 
        if (r < eth_entries) {
                int base_qset = r * 4;
@@ -2005,12 +2302,30 @@ do { \
                R("FL avail:", fl.avail);
                R("FL PIDX:", fl.pidx);
                R("FL CIDX:", fl.cidx);
-       } else if (toe_idx < toe_entries) {
-               const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
-               const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
-               int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
+               RL("RxPackets:", stats.pkts);
+               RL("RxCSO:", stats.rx_cso);
+               RL("VLANxtract:", stats.vlan_ex);
+               RL("LROmerged:", stats.lro_merged);
+               RL("LROpackets:", stats.lro_pkts);
+               RL("RxDrops:", stats.rx_drops);
+               TL("TSO:", tso);
+               TL("TxCSO:", tx_cso);
+               TL("VLANins:", vlan_ins);
+               TL("TxQFull:", q.stops);
+               TL("TxQRestarts:", q.restarts);
+               TL("TxMapErr:", mapping_err);
+               RL("FLAllocErr:", fl.alloc_failed);
+               RL("FLLrgAlcErr:", fl.large_alloc_failed);
+               RL("FLStarving:", fl.starving);
+
+       } else if (iscsi_idx < iscsi_entries) {
+               const struct sge_ofld_rxq *rx =
+                       &adap->sge.ofldrxq[iscsi_idx * 4];
+               const struct sge_ofld_txq *tx =
+                       &adap->sge.ofldtxq[iscsi_idx * 4];
+               int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
 
-               S("QType:", "TOE");
+               S("QType:", "iSCSI");
                T("TxQ ID:", q.cntxt_id);
                T("TxQ size:", q.size);
                T("TxQ inuse:", q.in_use);
@@ -2030,6 +2345,13 @@ do { \
                R("FL avail:", fl.avail);
                R("FL PIDX:", fl.pidx);
                R("FL CIDX:", fl.cidx);
+               RL("RxPackets:", stats.pkts);
+               RL("RxImmPkts:", stats.imm);
+               RL("RxNoMem:", stats.nomem);
+               RL("FLAllocErr:", fl.alloc_failed);
+               RL("FLLrgAlcErr:", fl.large_alloc_failed);
+               RL("FLStarving:", fl.starving);
+
        } else if (rdma_idx < rdma_entries) {
                const struct sge_ofld_rxq *rx =
                                &adap->sge.rdmarxq[rdma_idx * 4];
@@ -2052,6 +2374,13 @@ do { \
                R("FL avail:", fl.avail);
                R("FL PIDX:", fl.pidx);
                R("FL CIDX:", fl.cidx);
+               RL("RxPackets:", stats.pkts);
+               RL("RxImmPkts:", stats.imm);
+               RL("RxNoMem:", stats.nomem);
+               RL("FLAllocErr:", fl.alloc_failed);
+               RL("FLLrgAlcErr:", fl.large_alloc_failed);
+               RL("FLStarving:", fl.starving);
+
        } else if (ciq_idx < ciq_entries) {
                const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
                int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
@@ -2067,6 +2396,9 @@ do { \
                S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
                S3("u", "Intr pktcnt:",
                   adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+               RL("RxAN:", stats.an);
+               RL("RxNoMem:", stats.nomem);
+
        } else if (ctrl_idx < ctrl_entries) {
                const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
                int n = min(4, adap->params.nports - 4 * ctrl_idx);
@@ -2077,6 +2409,8 @@ do { \
                T("TxQ inuse:", q.in_use);
                T("TxQ CIDX:", q.cidx);
                T("TxQ PIDX:", q.pidx);
+               TL("TxQFull:", q.stops);
+               TL("TxQRestarts:", q.restarts);
        } else if (fq_idx == 0) {
                const struct sge_rspq *evtq = &adap->sge.fw_evtq;
 
@@ -2092,10 +2426,14 @@ do { \
                           adap->sge.counter_val[evtq->pktcnt_idx]);
        }
 #undef R
+#undef RL
 #undef T
+#undef TL
 #undef S
+#undef R3
+#undef T3
 #undef S3
-return 0;
+       return 0;
 }
 
 static int sge_queue_entries(const struct adapter *adap)
@@ -2212,6 +2550,73 @@ static const struct file_operations mem_debugfs_fops = {
        .llseek  = default_llseek,
 };
 
+static int tid_info_show(struct seq_file *seq, void *v)
+{
+       struct adapter *adap = seq->private;
+       const struct tid_info *t = &adap->tids;
+       enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+       if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+               unsigned int sb;
+
+               if (chip <= CHELSIO_T5)
+                       sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
+               else
+                       sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
+
+               if (sb) {
+                       seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
+                                  adap->tids.hash_base,
+                                  t->ntids - 1);
+                       seq_printf(seq, ", in use: %u/%u\n",
+                                  atomic_read(&t->tids_in_use),
+                                  atomic_read(&t->hash_tids_in_use));
+               } else if (adap->flags & FW_OFLD_CONN) {
+                       seq_printf(seq, "TID range: %u..%u/%u..%u",
+                                  t->aftid_base,
+                                  t->aftid_end,
+                                  adap->tids.hash_base,
+                                  t->ntids - 1);
+                       seq_printf(seq, ", in use: %u/%u\n",
+                                  atomic_read(&t->tids_in_use),
+                                  atomic_read(&t->hash_tids_in_use));
+               } else {
+                       seq_printf(seq, "TID range: %u..%u",
+                                  adap->tids.hash_base,
+                                  t->ntids - 1);
+                       seq_printf(seq, ", in use: %u\n",
+                                  atomic_read(&t->hash_tids_in_use));
+               }
+       } else if (t->ntids) {
+               seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+               seq_printf(seq, ", in use: %u\n",
+                          atomic_read(&t->tids_in_use));
+       }
+
+       if (t->nstids)
+               seq_printf(seq, "STID range: %u..%u, in use: %u\n",
+                          (!t->stid_base &&
+                          (chip <= CHELSIO_T5)) ?
+                          t->stid_base + 1 : t->stid_base,
+                          t->stid_base + t->nstids - 1, t->stids_in_use);
+       if (t->natids)
+               seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
+                          t->natids - 1, t->atids_in_use);
+       seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
+                  t->ftid_base + t->nftids - 1);
+       if (t->nsftids)
+               seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
+                          t->sftid_base, t->sftid_base + t->nsftids - 2,
+                          t->sftids_in_use);
+       if (t->ntids)
+               seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
+                          t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
+                          t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
+       return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+
 static void add_debugfs_mem(struct adapter *adap, const char *name,
                            unsigned int idx, unsigned int size_mb)
 {
@@ -2596,6 +3001,10 @@ int t4_setup_debugfs(struct adapter *adap)
                { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
                { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
                { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
+               { "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
+               { "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
+               { "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
+               { "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
                { "l2t", &t4_l2t_fops, S_IRUSR, 0},
                { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
                { "rss", &rss_debugfs_fops, S_IRUSR, 0 },
@@ -2625,6 +3034,7 @@ int t4_setup_debugfs(struct adapter *adap)
 #if IS_ENABLED(CONFIG_IPV6)
                { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
 #endif
+               { "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
                { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
                { "meminfo", &meminfo_fops, S_IRUSR, 0 },
        };
@@ -2665,16 +3075,19 @@ int t4_setup_debugfs(struct adapter *adap)
                                        EXT_MEM1_SIZE_G(size));
                }
        } else {
-               if (i & EXT_MEM_ENABLE_F)
+               if (i & EXT_MEM_ENABLE_F) {
                        size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
                        add_debugfs_mem(adap, "mc", MEM_MC,
                                        EXT_MEM_SIZE_G(size));
+               }
        }
 
        de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
                                      &flash_debugfs_fops, adap->params.sf_size);
        debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
                            adap->debugfs_root, &adap->use_bd);
+       debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR,
+                           adap->debugfs_root, &adap->trace_rss);
 
        return 0;
 }
index 27e87b6baa455cc41c85510ce6a54d23d3cae8e7..f35dd2284d4057ae3516bb715920457db9849dfe 100644 (file)
@@ -1548,7 +1548,7 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
                t->stid_tab[stid].data = data;
                stid -= t->nstids;
                stid += t->sftid_base;
-               t->stids_in_use++;
+               t->sftids_in_use++;
        }
        spin_unlock_bh(&t->stid_lock);
        return stid;
@@ -1573,10 +1573,14 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
        else
                bitmap_release_region(t->stid_bmap, stid, 2);
        t->stid_tab[stid].data = NULL;
-       if (family == PF_INET)
-               t->stids_in_use--;
-       else
-               t->stids_in_use -= 4;
+       if (stid < t->nstids) {
+               if (family == PF_INET)
+                       t->stids_in_use--;
+               else
+                       t->stids_in_use -= 4;
+       } else {
+               t->sftids_in_use--;
+       }
        spin_unlock_bh(&t->stid_lock);
 }
 EXPORT_SYMBOL(cxgb4_free_stid);
@@ -1654,20 +1658,25 @@ static void process_tid_release_list(struct work_struct *work)
  */
 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
 {
-       void *old;
        struct sk_buff *skb;
        struct adapter *adap = container_of(t, struct adapter, tids);
 
-       old = t->tid_tab[tid];
+       WARN_ON(tid >= t->ntids);
+
+       if (t->tid_tab[tid]) {
+               t->tid_tab[tid] = NULL;
+               if (t->hash_base && (tid >= t->hash_base))
+                       atomic_dec(&t->hash_tids_in_use);
+               else
+                       atomic_dec(&t->tids_in_use);
+       }
+
        skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
        if (likely(skb)) {
-               t->tid_tab[tid] = NULL;
                mk_tid_release(skb, chan, tid);
                t4_ofld_send(adap, skb);
        } else
                cxgb4_queue_tid_release(t, chan, tid);
-       if (old)
-               atomic_dec(&t->tids_in_use);
 }
 EXPORT_SYMBOL(cxgb4_remove_tid);
 
@@ -1702,9 +1711,11 @@ static int tid_init(struct tid_info *t)
        spin_lock_init(&t->atid_lock);
 
        t->stids_in_use = 0;
+       t->sftids_in_use = 0;
        t->afree = NULL;
        t->atids_in_use = 0;
        atomic_set(&t->tids_in_use, 0);
+       atomic_set(&t->hash_tids_in_use, 0);
 
        /* Setup the free list for atid_tab and clear the stid bitmap. */
        if (natids) {
@@ -4814,6 +4825,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                adapter->params.offload = 0;
        }
 
+       if (is_offload(adapter)) {
+               if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+                       u32 hash_base, hash_reg;
+
+                       if (chip <= CHELSIO_T5) {
+                               hash_reg = LE_DB_TID_HASHBASE_A;
+                               hash_base = t4_read_reg(adapter, hash_reg);
+                               adapter->tids.hash_base = hash_base / 4;
+                       } else {
+                               hash_reg = T6_LE_DB_HASH_TID_BASE_A;
+                               hash_base = t4_read_reg(adapter, hash_reg);
+                               adapter->tids.hash_base = hash_base;
+                       }
+               }
+       }
+
        /* See what interrupts we'll be using */
        if (msi > 1 && enable_msix(adapter) == 0)
                adapter->flags |= USING_MSIX;
index b27897d4f787896e0934aa1f126ddf76cdda5aa8..c3a8be5541e7cfdbc55f41b81f12917d8561b2a4 100644 (file)
@@ -96,6 +96,7 @@ struct tid_info {
        unsigned long *stid_bmap;
        unsigned int nstids;
        unsigned int stid_base;
+       unsigned int hash_base;
 
        union aopen_entry *atid_tab;
        unsigned int natids;
@@ -116,8 +117,12 @@ struct tid_info {
 
        spinlock_t stid_lock;
        unsigned int stids_in_use;
+       unsigned int sftids_in_use;
 
+       /* TIDs in the TCAM */
        atomic_t tids_in_use;
+       /* TIDs in the HASH */
+       atomic_t hash_tids_in_use;
 };
 
 static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -147,7 +152,10 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
                                    unsigned int tid)
 {
        t->tid_tab[tid] = data;
-       atomic_inc(&t->tids_in_use);
+       if (t->hash_base && (tid >= t->hash_base))
+               atomic_inc(&t->hash_tids_in_use);
+       else
+               atomic_inc(&t->tids_in_use);
 }
 
 int cxgb4_alloc_atid(struct tid_info *t, void *data);
index d4248d74f5601b711c1d0c9d65923ec7acce627c..78f446c58422ecd0ec0794f1f41ba403e6292290 100644 (file)
@@ -1424,18 +1424,17 @@ static void restart_ctrlq(unsigned long data)
                struct fw_wr_hdr *wr;
                unsigned int ndesc = skb->priority;     /* previously saved */
 
-               /*
-                * Write descriptors and free skbs outside the lock to limit
+               written += ndesc;
+               /* Write descriptors and free skbs outside the lock to limit
                 * wait times.  q->full is still set so new skbs will be queued.
                 */
+               wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+               txq_advance(&q->q, ndesc);
                spin_unlock(&q->sendq.lock);
 
-               wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
                inline_tx_skb(skb, &q->q, wr);
                kfree_skb(skb);
 
-               written += ndesc;
-               txq_advance(&q->q, ndesc);
                if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
                        unsigned long old = q->q.stops;
 
index 91750ad580ae8fa66063d1ac7c4df9657b4e6ed1..ac368efe2862ac22d19e7da92f8ff1f7e0b0a8cf 100644 (file)
@@ -4264,6 +4264,119 @@ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
        }
 }
 
+/**
+ *     t4_set_trace_filter - configure one of the tracing filters
+ *     @adap: the adapter
+ *     @tp: the desired trace filter parameters
+ *     @idx: which filter to configure
+ *     @enable: whether to enable or disable the filter
+ *
+ *     Configures one of the tracing filters available in HW.  If @enable is
+ *     %0 @tp is not examined and may be %NULL. The user is responsible to
+ *     set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
+ */
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+                       int idx, int enable)
+{
+       int i, ofst = idx * 4;
+       u32 data_reg, mask_reg, cfg;
+       u32 multitrc = TRCMULTIFILTER_F;
+
+       if (!enable) {
+               t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+               return 0;
+       }
+
+       cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
+       if (cfg & TRCMULTIFILTER_F) {
+               /* If multiple tracers are enabled, then maximum
+                * capture size is 2.5KB (FIFO size of a single channel)
+                * minus 2 flits for CPL_TRACE_PKT header.
+                */
+               if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
+                       return -EINVAL;
+       } else {
+               /* If multiple tracers are disabled, to avoid deadlocks
+                * maximum packet capture size of 9600 bytes is recommended.
+                * Also in this mode, only trace0 can be enabled and running.
+                */
+               multitrc = 0;
+               if (tp->snap_len > 9600 || idx)
+                       return -EINVAL;
+       }
+
+       if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
+           tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
+           tp->min_len > TFMINPKTSIZE_M)
+               return -EINVAL;
+
+       /* stop the tracer we'll be changing */
+       t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+
+       idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
+       data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
+       mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
+
+       for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+               t4_write_reg(adap, data_reg, tp->data[i]);
+               t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+       }
+       t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
+                    TFCAPTUREMAX_V(tp->snap_len) |
+                    TFMINPKTSIZE_V(tp->min_len));
+       t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
+                    TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
+                    (is_t4(adap->params.chip) ?
+                    TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
+                    T5_TFPORT_V(tp->port) | T5_TFEN_F |
+                    T5_TFINVERTMATCH_V(tp->invert)));
+
+       return 0;
+}
+
+/**
+ *     t4_get_trace_filter - query one of the tracing filters
+ *     @adap: the adapter
+ *     @tp: the current trace filter parameters
+ *     @idx: which trace filter to query
+ *     @enabled: non-zero if the filter is enabled
+ *
+ *     Returns the current settings of one of the HW tracing filters.
+ */
+void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+                        int *enabled)
+{
+       u32 ctla, ctlb;
+       int i, ofst = idx * 4;
+       u32 data_reg, mask_reg;
+
+       ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
+       ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
+
+       if (is_t4(adap->params.chip)) {
+               *enabled = !!(ctla & TFEN_F);
+               tp->port =  TFPORT_G(ctla);
+               tp->invert = !!(ctla & TFINVERTMATCH_F);
+       } else {
+               *enabled = !!(ctla & T5_TFEN_F);
+               tp->port = T5_TFPORT_G(ctla);
+               tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
+       }
+       tp->snap_len = TFCAPTUREMAX_G(ctlb);
+       tp->min_len = TFMINPKTSIZE_G(ctlb);
+       tp->skip_ofst = TFOFFSET_G(ctla);
+       tp->skip_len = TFLENGTH_G(ctla);
+
+       ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
+       data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
+       mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
+
+       for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+               tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+               tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+       }
+}
+
 /**
  *     t4_pmtx_get_stats - returns the HW stats from PMTX
  *     @adap: the adapter
index e444dc4ebbd8fd11ec177a3f7d90d9d8930b18fb..fc3044c8ac1c9a5e824c02208c63294b6a0ec056 100644 (file)
 #define TRCMULTIFILTER_F    TRCMULTIFILTER_V(1U)
 
 #define MPS_TRC_RSS_CONTROL_A          0x9808
+#define MPS_TRC_FILTER1_RSS_CONTROL_A  0x9ff4
+#define MPS_TRC_FILTER2_RSS_CONTROL_A  0x9ffc
+#define MPS_TRC_FILTER3_RSS_CONTROL_A  0xa004
 #define MPS_T5_TRC_RSS_CONTROL_A       0xa00c
 
 #define RSSCONTROL_S    16
 #define QUEUENUMBER_S    0
 #define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
 
+#define TFINVERTMATCH_S    24
+#define TFINVERTMATCH_V(x) ((x) << TFINVERTMATCH_S)
+#define TFINVERTMATCH_F    TFINVERTMATCH_V(1U)
+
+#define TFEN_S    22
+#define TFEN_V(x) ((x) << TFEN_S)
+#define TFEN_F    TFEN_V(1U)
+
+#define TFPORT_S    18
+#define TFPORT_M    0xfU
+#define TFPORT_V(x) ((x) << TFPORT_S)
+#define TFPORT_G(x) (((x) >> TFPORT_S) & TFPORT_M)
+
+#define TFLENGTH_S    8
+#define TFLENGTH_M    0x1fU
+#define TFLENGTH_V(x) ((x) << TFLENGTH_S)
+#define TFLENGTH_G(x) (((x) >> TFLENGTH_S) & TFLENGTH_M)
+
+#define TFOFFSET_S    0
+#define TFOFFSET_M    0x1fU
+#define TFOFFSET_V(x) ((x) << TFOFFSET_S)
+#define TFOFFSET_G(x) (((x) >> TFOFFSET_S) & TFOFFSET_M)
+
+#define T5_TFINVERTMATCH_S    25
+#define T5_TFINVERTMATCH_V(x) ((x) << T5_TFINVERTMATCH_S)
+#define T5_TFINVERTMATCH_F    T5_TFINVERTMATCH_V(1U)
+
+#define T5_TFEN_S    23
+#define T5_TFEN_V(x) ((x) << T5_TFEN_S)
+#define T5_TFEN_F    T5_TFEN_V(1U)
+
+#define T5_TFPORT_S    18
+#define T5_TFPORT_M    0x1fU
+#define T5_TFPORT_V(x) ((x) << T5_TFPORT_S)
+#define T5_TFPORT_G(x) (((x) >> T5_TFPORT_S) & T5_TFPORT_M)
+
+#define MPS_TRC_FILTER_MATCH_CTL_A_A 0x9810
+#define MPS_TRC_FILTER_MATCH_CTL_B_A 0x9820
+
+#define TFMINPKTSIZE_S    16
+#define TFMINPKTSIZE_M    0x1ffU
+#define TFMINPKTSIZE_V(x) ((x) << TFMINPKTSIZE_S)
+#define TFMINPKTSIZE_G(x) (((x) >> TFMINPKTSIZE_S) & TFMINPKTSIZE_M)
+
+#define TFCAPTUREMAX_S    0
+#define TFCAPTUREMAX_M    0x3fffU
+#define TFCAPTUREMAX_V(x) ((x) << TFCAPTUREMAX_S)
+#define TFCAPTUREMAX_G(x) (((x) >> TFCAPTUREMAX_S) & TFCAPTUREMAX_M)
+
+#define MPS_TRC_FILTER0_MATCH_A 0x9c00
+#define MPS_TRC_FILTER0_DONT_CARE_A 0x9c80
+#define MPS_TRC_FILTER1_MATCH_A 0x9d00
+
 #define TP_RSS_CONFIG_A 0x7df0
 
 #define TNL4TUPENIPV6_S    31
 #define T6_LIPMISS_F    T6_LIPMISS_V(1U)
 
 #define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_SERVER_INDEX_A 0x19c18
+#define LE_DB_SRVR_START_INDEX_A 0x19c18
+#define LE_DB_ACT_CNT_IPV4_A 0x19c20
+#define LE_DB_ACT_CNT_IPV6_A 0x19c24
 #define LE_DB_HASH_TID_BASE_A 0x19c30
 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
 #define LE_DB_INT_CAUSE_A 0x19c3c
 #define LE_DB_TID_HASHBASE_A 0x19df8
+#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
 
 #define HASHEN_S    20
 #define HASHEN_V(x) ((x) << HASHEN_S)
index 84b6a2b46aec474959c69e84288386dc1d499282..8b53f7d4bebf33075f7f891bd3ceafc717fb6208 100644 (file)
@@ -33,7 +33,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.1.1.83"
+#define DRV_VERSION            "2.3.0.12"
 #define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
@@ -191,6 +191,25 @@ struct enic {
        struct vnic_gen_stats gen_stats;
 };
 
+static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
+{
+       struct enic *enic = vdev->priv;
+
+       return enic->netdev;
+}
+
+/* wrappers function for kernel log
+ * Make sure variable vdev of struct vnic_dev is available in the block where
+ * these macros are used
+ */
+#define vdev_info(args...)     dev_info(&vdev->pdev->dev, args)
+#define vdev_warn(args...)     dev_warn(&vdev->pdev->dev, args)
+#define vdev_err(args...)      dev_err(&vdev->pdev->dev, args)
+
+#define vdev_netinfo(args...)  netdev_info(vnic_get_netdev(vdev), args)
+#define vdev_netwarn(args...)  netdev_warn(vnic_get_netdev(vdev), args)
+#define vdev_neterr(args...)   netdev_err(vnic_get_netdev(vdev), args)
+
 static inline struct device *enic_get_dev(struct enic *enic)
 {
        return &(enic->pdev->dev);
index 8f646e4e968b329ab53dcb70af3c733e7788661f..3352d027ab895c59195ef79bd322112964c21c75 100644 (file)
@@ -2484,6 +2484,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_iounmap;
        }
 
+       err = vnic_devcmd_init(enic->vdev);
+
+       if (err)
+               goto err_out_vnic_unregister;
+
 #ifdef CONFIG_PCI_IOV
        /* Get number of subvnics */
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -2658,8 +2663,8 @@ err_out_disable_sriov_pp:
                pci_disable_sriov(pdev);
                enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
        }
-err_out_vnic_unregister:
 #endif
+err_out_vnic_unregister:
        vnic_dev_unregister(enic->vdev);
 err_out_iounmap:
        enic_iounmap(enic);
index 0daa1c7073cb008fb79d774adb5c35c9c1a69bde..abeda2a9ea273745f532f0a17732ed8630898bb4 100644 (file)
@@ -24,6 +24,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_cq.h"
+#include "enic.h"
 
 void vnic_cq_free(struct vnic_cq *cq)
 {
@@ -42,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
 
        cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
        if (!cq->ctrl) {
-               pr_err("Failed to hook CQ[%d] resource\n", index);
+               vdev_err("Failed to hook CQ[%d] resource\n", index);
                return -EINVAL;
        }
 
index 62f7b7baf93cd79f77de9aceb8d977461c075cb7..19a49a6e3911b8f3308de52dae87eea44b2eabc0 100644 (file)
 #include "vnic_resource.h"
 #include "vnic_devcmd.h"
 #include "vnic_dev.h"
+#include "vnic_wq.h"
 #include "vnic_stats.h"
-
-enum vnic_proxy_type {
-       PROXY_NONE,
-       PROXY_BY_BDF,
-       PROXY_BY_INDEX,
-};
-
-struct vnic_res {
-       void __iomem *vaddr;
-       dma_addr_t bus_addr;
-       unsigned int count;
-};
-
-struct vnic_intr_coal_timer_info {
-       u32 mul;
-       u32 div;
-       u32 max_usec;
-};
-
-struct vnic_dev {
-       void *priv;
-       struct pci_dev *pdev;
-       struct vnic_res res[RES_TYPE_MAX];
-       enum vnic_dev_intr_mode intr_mode;
-       struct vnic_devcmd __iomem *devcmd;
-       struct vnic_devcmd_notify *notify;
-       struct vnic_devcmd_notify notify_copy;
-       dma_addr_t notify_pa;
-       u32 notify_sz;
-       dma_addr_t linkstatus_pa;
-       struct vnic_stats *stats;
-       dma_addr_t stats_pa;
-       struct vnic_devcmd_fw_info *fw_info;
-       dma_addr_t fw_info_pa;
-       enum vnic_proxy_type proxy;
-       u32 proxy_index;
-       u64 args[VNIC_DEVCMD_NARGS];
-       struct vnic_intr_coal_timer_info intr_coal_timer_info;
-};
+#include "enic.h"
 
 #define VNIC_MAX_RES_HDR_SIZE \
        (sizeof(struct vnic_resource_header) + \
@@ -90,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
                return -EINVAL;
 
        if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
-               pr_err("vNIC BAR0 res hdr length error\n");
+               vdev_err("vNIC BAR0 res hdr length error\n");
                return -EINVAL;
        }
 
        rh  = bar->vaddr;
        mrh = bar->vaddr;
        if (!rh) {
-               pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+               vdev_err("vNIC BAR0 res hdr not mem-mapped\n");
                return -EINVAL;
        }
 
@@ -106,11 +69,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
                (ioread32(&rh->version) != VNIC_RES_VERSION)) {
                if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
                        (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
-                       pr_err("vNIC BAR0 res magic/version error "
-                       "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
-                       VNIC_RES_MAGIC, VNIC_RES_VERSION,
-                       MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
-                       ioread32(&rh->magic), ioread32(&rh->version));
+                       vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+                                VNIC_RES_MAGIC, VNIC_RES_VERSION,
+                                MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+                                ioread32(&rh->magic), ioread32(&rh->version));
                        return -EINVAL;
                }
        }
@@ -144,17 +106,15 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
                        /* each count is stride bytes long */
                        len = count * VNIC_RES_STRIDE;
                        if (len + bar_offset > bar[bar_num].len) {
-                               pr_err("vNIC BAR0 resource %d "
-                                       "out-of-bounds, offset 0x%x + "
-                                       "size 0x%x > bar len 0x%lx\n",
-                                       type, bar_offset,
-                                       len,
-                                       bar[bar_num].len);
+                               vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+                                        type, bar_offset, len,
+                                        bar[bar_num].len);
                                return -EINVAL;
                        }
                        break;
                case RES_TYPE_INTR_PBA_LEGACY:
                case RES_TYPE_DEVCMD:
+               case RES_TYPE_DEVCMD2:
                        len = count;
                        break;
                default:
@@ -238,8 +198,8 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
                &ring->base_addr_unaligned);
 
        if (!ring->descs_unaligned) {
-               pr_err("Failed to allocate ring (size=%d), aborting\n",
-                       (int)ring->size);
+               vdev_err("Failed to allocate ring (size=%d), aborting\n",
+                        (int)ring->size);
                return -ENOMEM;
        }
 
@@ -281,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                return -ENODEV;
        }
        if (status & STAT_BUSY) {
-               pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+               vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd));
                return -EBUSY;
        }
 
@@ -315,8 +275,8 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                                        return -err;
                                if (err != ERR_ECMDUNKNOWN ||
                                    cmd != CMD_CAPABILITY)
-                                       pr_err("Error %d devcmd %d\n",
-                                               err, _CMD_N(cmd));
+                                       vdev_neterr("Error %d devcmd %d\n",
+                                                   err, _CMD_N(cmd));
                                return -err;
                        }
 
@@ -330,10 +290,160 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                }
        }
 
-       pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+       vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd));
        return -ETIMEDOUT;
 }
 
+static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+                         int wait)
+{
+       struct devcmd2_controller *dc2c = vdev->devcmd2;
+       struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+       unsigned int i;
+       int delay, err;
+       u32 fetch_index, posted, new_posted;
+
+       posted = ioread32(&dc2c->wq_ctrl->posted_index);
+       fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
+
+       if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF)
+               return -ENODEV;
+
+       new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+       if (new_posted == fetch_index) {
+               vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
+                           _CMD_N(cmd), fetch_index, posted);
+               return -EBUSY;
+       }
+       dc2c->cmd_ring[posted].cmd = cmd;
+       dc2c->cmd_ring[posted].flags = 0;
+
+       if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+               dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+       if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
+               for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+                       dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+       /* Adding write memory barrier prevents compiler and/or CPU reordering,
+        * thus avoiding descriptor posting before descriptor is initialized.
+        * Otherwise, hardware can read stale descriptor fields.
+        */
+       wmb();
+       iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+
+       if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+               return 0;
+
+       for (delay = 0; delay < wait; delay++) {
+               if (result->color == dc2c->color) {
+                       dc2c->next_result++;
+                       if (dc2c->next_result == dc2c->result_size) {
+                               dc2c->next_result = 0;
+                               dc2c->color = dc2c->color ? 0 : 1;
+                       }
+                       if (result->error) {
+                               err = result->error;
+                               if (err != ERR_ECMDUNKNOWN ||
+                                   cmd != CMD_CAPABILITY)
+                                       vdev_neterr("Error %d devcmd %d\n",
+                                                   err, _CMD_N(cmd));
+                               return -err;
+                       }
+                       if (_CMD_DIR(cmd) & _CMD_DIR_READ)
+                               for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
+                                       vdev->args[i] = result->results[i];
+
+                       return 0;
+               }
+               udelay(100);
+       }
+
+       vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd));
+
+       return -ETIMEDOUT;
+}
+
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+       vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+       if (!vdev->devcmd)
+               return -ENODEV;
+       vdev->devcmd_rtn = _vnic_dev_cmd;
+
+       return 0;
+}
+
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+       int err;
+       unsigned int fetch_index;
+
+       if (vdev->devcmd2)
+               return 0;
+
+       vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
+       if (!vdev->devcmd2)
+               return -ENOMEM;
+
+       vdev->devcmd2->color = 1;
+       vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+       err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
+                                   DEVCMD2_DESC_SIZE);
+       if (err)
+               goto err_free_devcmd2;
+
+       fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
+       if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
+               vdev_err("Fatal error in devcmd2 init - hardware surprise removal");
+
+               return -ENODEV;
+       }
+
+       enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
+                          0);
+       vnic_wq_enable(&vdev->devcmd2->wq);
+
+       err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+                                      DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+       if (err)
+               goto err_free_wq;
+
+       vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
+       vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
+       vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+       vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
+                       VNIC_PADDR_TARGET;
+       vdev->args[1] = DEVCMD2_RING_SIZE;
+
+       err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+       if (err)
+               goto err_free_desc_ring;
+
+       vdev->devcmd_rtn = _vnic_dev_cmd2;
+
+       return 0;
+
+err_free_desc_ring:
+       vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+err_free_wq:
+       vnic_wq_disable(&vdev->devcmd2->wq);
+       vnic_wq_free(&vdev->devcmd2->wq);
+err_free_devcmd2:
+       kfree(vdev->devcmd2);
+       vdev->devcmd2 = NULL;
+
+       return err;
+}
+
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+       vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+       vnic_wq_disable(&vdev->devcmd2->wq);
+       vnic_wq_free(&vdev->devcmd2->wq);
+       kfree(vdev->devcmd2);
+}
+
 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
        enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
        u64 *a0, u64 *a1, int wait)
@@ -348,7 +458,7 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
        vdev->args[2] = *a0;
        vdev->args[3] = *a1;
 
-       err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+       err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
        if (err)
                return err;
 
@@ -357,7 +467,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
                err = (int)vdev->args[1];
                if (err != ERR_ECMDUNKNOWN ||
                    cmd != CMD_CAPABILITY)
-                       pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+                       vdev_neterr("Error %d proxy devcmd %d\n", err,
+                                   _CMD_N(cmd));
                return err;
        }
 
@@ -375,7 +486,7 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
        vdev->args[0] = *a0;
        vdev->args[1] = *a1;
 
-       err = _vnic_dev_cmd(vdev, cmd, wait);
+       err = vdev->devcmd_rtn(vdev, cmd, wait);
 
        *a0 = vdev->args[0];
        *a1 = vdev->args[1];
@@ -650,7 +761,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
 
        err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
        if (err)
-               pr_err("Can't set packet filter\n");
+               vdev_neterr("Can't set packet filter\n");
 
        return err;
 }
@@ -667,7 +778,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
 
        err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
        if (err)
-               pr_err("Can't add addr [%pM], %d\n", addr, err);
+               vdev_neterr("Can't add addr [%pM], %d\n", addr, err);
 
        return err;
 }
@@ -684,7 +795,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
 
        err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
        if (err)
-               pr_err("Can't del addr [%pM], %d\n", addr, err);
+               vdev_neterr("Can't del addr [%pM], %d\n", addr, err);
 
        return err;
 }
@@ -728,7 +839,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
        dma_addr_t notify_pa;
 
        if (vdev->notify || vdev->notify_pa) {
-               pr_err("notify block %p still allocated", vdev->notify);
+               vdev_neterr("notify block %p still allocated", vdev->notify);
                return -EINVAL;
        }
 
@@ -838,7 +949,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
        memset(vdev->args, 0, sizeof(vdev->args));
 
        if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
-               err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+               err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
        else
                err = ERR_ECMDUNKNOWN;
 
@@ -847,7 +958,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
         */
        if ((err == ERR_ECMDUNKNOWN) ||
                (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
-               pr_warn("Using default conversion factor for interrupt coalesce timer\n");
+               vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n");
                vnic_dev_intr_coal_timer_info_default(vdev);
                return 0;
        }
@@ -938,6 +1049,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
                        pci_free_consistent(vdev->pdev,
                                sizeof(struct vnic_devcmd_fw_info),
                                vdev->fw_info, vdev->fw_info_pa);
+               if (vdev->devcmd2)
+                       vnic_dev_deinit_devcmd2(vdev);
+
                kfree(vdev);
        }
 }
@@ -959,10 +1073,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
        if (vnic_dev_discover_res(vdev, bar, num_bars))
                goto err_out;
 
-       vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
-       if (!vdev->devcmd)
-               goto err_out;
-
        return vdev;
 
 err_out:
@@ -977,6 +1087,29 @@ struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
 }
 EXPORT_SYMBOL(vnic_dev_get_pdev);
 
+int vnic_devcmd_init(struct vnic_dev *vdev)
+{
+       void __iomem *res;
+       int err;
+
+       res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+       if (res) {
+               err = vnic_dev_init_devcmd2(vdev);
+               if (err)
+                       vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1",
+                                 err);
+               else
+                       return 0;
+       } else {
+               vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
+       }
+       err = vnic_dev_init_devcmd1(vdev);
+       if (err)
+               vdev_err("DEVCMD1 initialization failed: %d", err);
+
+       return err;
+}
+
 int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
 {
        u64 a0, a1 = len;
index 1fb214efcebaf0bb959dc62a0871f792cd2b4e43..b013b6a78e8772a9bdc077e0c0bd136092307193 100644 (file)
@@ -70,7 +70,48 @@ struct vnic_dev_ring {
        unsigned int desc_avail;
 };
 
-struct vnic_dev;
+enum vnic_proxy_type {
+       PROXY_NONE,
+       PROXY_BY_BDF,
+       PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+       void __iomem *vaddr;
+       dma_addr_t bus_addr;
+       unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+       u32 mul;
+       u32 div;
+       u32 max_usec;
+};
+
+struct vnic_dev {
+       void *priv;
+       struct pci_dev *pdev;
+       struct vnic_res res[RES_TYPE_MAX];
+       enum vnic_dev_intr_mode intr_mode;
+       struct vnic_devcmd __iomem *devcmd;
+       struct vnic_devcmd_notify *notify;
+       struct vnic_devcmd_notify notify_copy;
+       dma_addr_t notify_pa;
+       u32 notify_sz;
+       dma_addr_t linkstatus_pa;
+       struct vnic_stats *stats;
+       dma_addr_t stats_pa;
+       struct vnic_devcmd_fw_info *fw_info;
+       dma_addr_t fw_info_pa;
+       enum vnic_proxy_type proxy;
+       u32 proxy_index;
+       u64 args[VNIC_DEVCMD_NARGS];
+       struct vnic_intr_coal_timer_info intr_coal_timer_info;
+       struct devcmd2_controller *devcmd2;
+       int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+                         int wait);
+};
+
 struct vnic_stats;
 
 void *vnic_dev_priv(struct vnic_dev *vdev);
@@ -135,5 +176,6 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
                        struct filter *data);
+int vnic_devcmd_init(struct vnic_dev *vdev);
 
 #endif /* _VNIC_DEV_H_ */
index 435d0cd96c224c5c8b6a5c8db6498d41458b8cf6..2a812880b884f35e8ebc51d971be3639c8f71c74 100644 (file)
@@ -365,6 +365,12 @@ enum vnic_devcmd_cmd {
         */
        CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
 
+       /* Initialization for the devcmd2 interface.
+        * in: (u64) a0 = host result buffer physical address
+        * in: (u16) a1 = number of entries in result buffer
+        */
+       CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
        /* Add a filter.
         * in: (u64) a0= filter address
         *     (u32) a1= size of filter
@@ -629,4 +635,26 @@ struct vnic_devcmd {
        u64 args[VNIC_DEVCMD_NARGS];    /* RW cmd args (little-endian) */
 };
 
+#define DEVCMD2_FNORESULT      0x1     /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS     VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+       u16 pad;
+       u16 flags;
+       u32 cmd;
+       u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS  VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+       u64 results[VNIC_DEVCMD2_NRESULTS];
+       u32 pad;
+       u16 completed_index;
+       u8  error;
+       u8  color;
+};
+
+#define DEVCMD2_RING_SIZE      32
+#define DEVCMD2_DESC_SIZE      128
+
 #endif /* _VNIC_DEVCMD_H_ */
index 0ca107f7bc8ca33851e7c26339d4fa48ee0fced4..942759d9cb3c4f4a932839fb8e9ea028a6f4a459 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_intr.h"
+#include "enic.h"
 
 void vnic_intr_free(struct vnic_intr *intr)
 {
@@ -39,7 +40,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
 
        intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
        if (!intr->ctrl) {
-               pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+               vdev_err("Failed to hook INTR[%d].ctrl resource\n", index);
                return -EINVAL;
        }
 
index e0a73f1ca6f43e3b66558edb933cb9a9105aa5bb..4e45f88ac1d4e322ec80dcac4c33399fbf31189c 100644 (file)
@@ -48,6 +48,13 @@ enum vnic_res_type {
        RES_TYPE_RSVD7,
        RES_TYPE_DEVCMD,                /* Device command region */
        RES_TYPE_PASS_THRU_PAGE,        /* Pass-thru page */
+       RES_TYPE_SUBVNIC,               /* subvnic resource type */
+       RES_TYPE_MQ_WQ,                 /* MQ Work queues */
+       RES_TYPE_MQ_RQ,                 /* MQ Receive queues */
+       RES_TYPE_MQ_CQ,                 /* MQ Completion queues */
+       RES_TYPE_DEPRECATED1,           /* Old version of devcmd 2 */
+       RES_TYPE_DEPRECATED2,           /* Old version of devcmd 2 */
+       RES_TYPE_DEVCMD2,               /* Device control region */
 
        RES_TYPE_MAX,                   /* Count of resource types */
 };
index c4b2183bf352fb2a1881001777df91857c2d1f79..cce2777dfc415dc1da33b2e4866127edd095f90c 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_rq.h"
+#include "enic.h"
 
 static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
 {
@@ -91,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
 
        rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
        if (!rq->ctrl) {
-               pr_err("Failed to hook RQ[%d] resource\n", index);
+               vdev_err("Failed to hook RQ[%d] resource\n", index);
                return -EINVAL;
        }
 
@@ -167,6 +168,7 @@ void vnic_rq_enable(struct vnic_rq *rq)
 int vnic_rq_disable(struct vnic_rq *rq)
 {
        unsigned int wait;
+       struct vnic_dev *vdev = rq->vdev;
 
        iowrite32(0, &rq->ctrl->enable);
 
@@ -177,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq)
                udelay(10);
        }
 
-       pr_err("Failed to disable RQ[%d]\n", rq->index);
+       vdev_neterr("Failed to disable RQ[%d]\n", rq->index);
 
        return -ETIMEDOUT;
 }
index b5a1c937fad2fb321336340b91b3358b551cf95e..05ad16a7e872054b1c6d2e3a4a87235e4ef2c29f 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_wq.h"
+#include "enic.h"
 
 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
 {
@@ -94,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
 
        wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
        if (!wq->ctrl) {
-               pr_err("Failed to hook WQ[%d] resource\n", index);
+               vdev_err("Failed to hook WQ[%d] resource\n", index);
                return -EINVAL;
        }
 
@@ -113,10 +114,27 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
        return 0;
 }
 
-static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
-       unsigned int fetch_index, unsigned int posted_index,
-       unsigned int error_interrupt_enable,
-       unsigned int error_interrupt_offset)
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+                         unsigned int desc_count, unsigned int desc_size)
+{
+       int err;
+
+       wq->index = 0;
+       wq->vdev = vdev;
+
+       wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+       if (!wq->ctrl)
+               return -EINVAL;
+       vnic_wq_disable(wq);
+       err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+
+       return err;
+}
+
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+                       unsigned int fetch_index, unsigned int posted_index,
+                       unsigned int error_interrupt_enable,
+                       unsigned int error_interrupt_offset)
 {
        u64 paddr;
        unsigned int count = wq->ring.desc_count;
@@ -140,7 +158,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
        unsigned int error_interrupt_enable,
        unsigned int error_interrupt_offset)
 {
-       vnic_wq_init_start(wq, cq_index, 0, 0,
+       enic_wq_init_start(wq, cq_index, 0, 0,
                error_interrupt_enable,
                error_interrupt_offset);
 }
@@ -158,6 +176,7 @@ void vnic_wq_enable(struct vnic_wq *wq)
 int vnic_wq_disable(struct vnic_wq *wq)
 {
        unsigned int wait;
+       struct vnic_dev *vdev = wq->vdev;
 
        iowrite32(0, &wq->ctrl->enable);
 
@@ -168,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq)
                udelay(10);
        }
 
-       pr_err("Failed to disable WQ[%d]\n", wq->index);
+       vdev_neterr("Failed to disable WQ[%d]\n", wq->index);
 
        return -ETIMEDOUT;
 }
index 296154351823e2ebc30a205f72d2437bf86d0c30..8944af935a6078831a39c5145f35cc9af300a3fd 100644 (file)
@@ -88,6 +88,17 @@ struct vnic_wq {
        unsigned int pkts_outstanding;
 };
 
+struct devcmd2_controller {
+       struct vnic_wq_ctrl __iomem *wq_ctrl;
+       struct vnic_devcmd2 *cmd_ring;
+       struct devcmd2_result *result;
+       u16 next_result;
+       u16 result_size;
+       int color;
+       struct vnic_dev_ring results_ring;
+       struct vnic_wq wq;
+};
+
 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
 {
        /* how many does SW own? */
@@ -174,5 +185,11 @@ void vnic_wq_enable(struct vnic_wq *wq);
 int vnic_wq_disable(struct vnic_wq *wq);
 void vnic_wq_clean(struct vnic_wq *wq,
        void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+                         unsigned int desc_count, unsigned int desc_size);
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+                       unsigned int fetch_index, unsigned int posted_index,
+                       unsigned int error_interrupt_enable,
+                       unsigned int error_interrupt_offset);
 
 #endif /* _VNIC_WQ_H_ */
index 36d835bd5f3c06f86020e4c192c02cf074bf2340..7d178bdb112eb7d14d5a62d74ea21ecaa30ba0e9 100644 (file)
@@ -620,6 +620,11 @@ enum be_if_flags {
                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |\
                                         BE_IF_FLAGS_MCAST_PROMISCUOUS)
 
+#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
+                       BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS   (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+
 /* An RX interface is an object with one or more MAC addresses and
  * filtering capabilities. */
 struct be_cmd_req_if_create {
index d86bc5d5224627a812ba0a430c21f7a4f23513b3..12687bf52b9518eaa1c4bb538ff26fcc88ce7acc 100644 (file)
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
        if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
                return 0;
 
+       /* if device is not running, copy MAC to netdev->dev_addr */
+       if (!netif_running(netdev))
+               goto done;
+
        /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
         * privilege or if PF did not provision the new MAC address.
         * On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
                status = -EPERM;
                goto err;
        }
-
-       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       dev_info(dev, "MAC address changed to %pM\n", mac);
+done:
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+       dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
        return 0;
 err:
        dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2452,10 +2456,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
        be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
 }
 
-static void be_rx_cq_clean(struct be_rx_obj *rxo)
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
 {
-       struct be_rx_page_info *page_info;
        struct be_queue_info *rxq = &rxo->q;
+       struct be_rx_page_info *page_info;
+
+       while (atomic_read(&rxq->used) > 0) {
+               page_info = get_rx_page_info(rxo);
+               put_page(page_info->page);
+               memset(page_info, 0, sizeof(*page_info));
+       }
+       BUG_ON(atomic_read(&rxq->used));
+       rxq->tail = 0;
+       rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
        struct be_queue_info *rx_cq = &rxo->cq;
        struct be_rx_compl_info *rxcp;
        struct be_adapter *adapter = rxo->adapter;
@@ -2492,16 +2510,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
 
        /* After cleanup, leave the CQ in unarmed state */
        be_cq_notify(adapter, rx_cq->id, false, 0);
-
-       /* Then free posted rx buffers that were not used */
-       while (atomic_read(&rxq->used) > 0) {
-               page_info = get_rx_page_info(rxo);
-               put_page(page_info->page);
-               memset(page_info, 0, sizeof(*page_info));
-       }
-       BUG_ON(atomic_read(&rxq->used));
-       rxq->tail = 0;
-       rxq->head = 0;
 }
 
 static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2581,8 +2589,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
                        be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
                        napi_hash_del(&eqo->napi);
                        netif_napi_del(&eqo->napi);
+                       free_cpumask_var(eqo->affinity_mask);
                }
-               free_cpumask_var(eqo->affinity_mask);
                be_queue_free(adapter, &eqo->q);
        }
 }
@@ -2599,13 +2607,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
 
        for_all_evt_queues(adapter, eqo, i) {
                int numa_node = dev_to_node(&adapter->pdev->dev);
-               if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
-                       return -ENOMEM;
-               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
-                               eqo->affinity_mask);
-               netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
-                              BE_NAPI_WEIGHT);
-               napi_hash_add(&eqo->napi);
+
                aic = &adapter->aic_obj[i];
                eqo->adapter = adapter;
                eqo->idx = i;
@@ -2621,6 +2623,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
                rc = be_cmd_eq_create(adapter, eqo);
                if (rc)
                        return rc;
+
+               if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+                       return -ENOMEM;
+               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+                               eqo->affinity_mask);
+               netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+                              BE_NAPI_WEIGHT);
+               napi_hash_add(&eqo->napi);
        }
        return 0;
 }
@@ -3359,13 +3369,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
        for_all_rx_queues(adapter, rxo, i) {
                q = &rxo->q;
                if (q->created) {
+                       /* If RXQs are destroyed while in an "out of buffer"
+                        * state, there is a possibility of an HW stall on
+                        * Lancer. So, post 64 buffers to each queue to relieve
+                        * the "out of buffer" condition.
+                        * Make sure there's space in the RXQ before posting.
+                        */
+                       if (lancer_chip(adapter)) {
+                               be_rx_cq_clean(rxo);
+                               if (atomic_read(&q->used) == 0)
+                                       be_post_rx_frags(rxo, GFP_KERNEL,
+                                                        MAX_RX_POST);
+                       }
+
                        be_cmd_rxq_destroy(adapter, q);
                        be_rx_cq_clean(rxo);
+                       be_rxq_clean(rxo);
                }
                be_queue_free(adapter, q);
        }
 }
 
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+       be_cmd_pmac_del(adapter, adapter->if_handle,
+                       adapter->pmac_id[0], 0);
+
+       be_clear_uc_list(adapter);
+
+       /* The IFACE flags are enabled in the open path and cleared
+        * in the close path. When a VF gets detached from the host and
+        * assigned to a VM the following happens:
+        *      - VF's IFACE flags get cleared in the detach path
+        *      - IFACE create is issued by the VF in the attach path
+        * Due to a bug in the BE3/Skyhawk-R FW
+        * (Lancer FW doesn't have the bug), the IFACE capability flags
+        * specified along with the IFACE create cmd issued by a VF are not
+        * honoured by FW.  As a consequence, if a *new* driver
+        * (that enables/disables IFACE flags in open/close)
+        * is loaded in the host and an *old* driver is * used by a VM/VF,
+        * the IFACE gets created *without* the needed flags.
+        * To avoid this, disable RX-filter flags only for Lancer.
+        */
+       if (lancer_chip(adapter)) {
+               be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+               adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+       }
+}
+
 static int be_close(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -3378,6 +3429,8 @@ static int be_close(struct net_device *netdev)
        if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
                return 0;
 
+       be_disable_if_filters(adapter);
+
        be_roce_dev_close(adapter);
 
        if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3397,7 +3450,6 @@ static int be_close(struct net_device *netdev)
        be_tx_compl_clean(adapter);
 
        be_rx_qs_destroy(adapter);
-       be_clear_uc_list(adapter);
 
        for_all_evt_queues(adapter, eqo, i) {
                if (msix_enabled(adapter))
@@ -3482,6 +3534,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
        return 0;
 }
 
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+       int status;
+
+       status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+       if (status)
+               return status;
+
+       /* For BE3 VFs, the PF programs the initial MAC address */
+       if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+               status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
+                                        adapter->if_handle,
+                                        &adapter->pmac_id[0], 0);
+               if (status)
+                       return status;
+       }
+
+       if (adapter->vlans_added)
+               be_vid_config(adapter);
+
+       be_set_rx_mode(adapter->netdev);
+
+       return 0;
+}
+
 static int be_open(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -3495,6 +3572,10 @@ static int be_open(struct net_device *netdev)
        if (status)
                goto err;
 
+       status = be_enable_if_filters(adapter);
+       if (status)
+               goto err;
+
        status = be_irq_register(adapter);
        if (status)
                goto err;
@@ -3685,16 +3766,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
        }
 }
 
-static void be_mac_clear(struct be_adapter *adapter)
-{
-       if (adapter->pmac_id) {
-               be_cmd_pmac_del(adapter, adapter->if_handle,
-                               adapter->pmac_id[0], 0);
-               kfree(adapter->pmac_id);
-               adapter->pmac_id = NULL;
-       }
-}
-
 #ifdef CONFIG_BE2NET_VXLAN
 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
 {
@@ -3769,8 +3840,8 @@ static int be_clear(struct be_adapter *adapter)
 #ifdef CONFIG_BE2NET_VXLAN
        be_disable_vxlan_offloads(adapter);
 #endif
-       /* delete the primary mac along with the uc-mac list */
-       be_mac_clear(adapter);
+       kfree(adapter->pmac_id);
+       adapter->pmac_id = NULL;
 
        be_cmd_if_destroy(adapter, adapter->if_handle,  0);
 
@@ -3781,25 +3852,11 @@ static int be_clear(struct be_adapter *adapter)
        return 0;
 }
 
-static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
-                       u32 cap_flags, u32 vf)
-{
-       u32 en_flags;
-
-       en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
-                  BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
-                  BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
-
-       en_flags &= cap_flags;
-
-       return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
-}
-
 static int be_vfs_if_create(struct be_adapter *adapter)
 {
        struct be_resources res = {0};
+       u32 cap_flags, en_flags, vf;
        struct be_vf_cfg *vf_cfg;
-       u32 cap_flags, vf;
        int status;
 
        /* If a FW profile exists, then cap_flags are updated */
@@ -3820,8 +3877,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
                        }
                }
 
-               status = be_if_create(adapter, &vf_cfg->if_handle,
-                                     cap_flags, vf + 1);
+               en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+                                       BE_IF_FLAGS_BROADCAST |
+                                       BE_IF_FLAGS_MULTICAST |
+                                       BE_IF_FLAGS_PASS_L3L4_ERRORS);
+               status = be_cmd_if_create(adapter, cap_flags, en_flags,
+                                         &vf_cfg->if_handle, vf + 1);
                if (status)
                        return status;
        }
@@ -4193,15 +4254,8 @@ static int be_mac_setup(struct be_adapter *adapter)
 
                memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
-       } else {
-               /* Maybe the HW was reset; dev_addr must be re-programmed */
-               memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
        }
 
-       /* For BE3-R VFs, the PF programs the initial MAC address */
-       if (!(BEx_chip(adapter) && be_virtfn(adapter)))
-               be_cmd_pmac_add(adapter, mac, adapter->if_handle,
-                               &adapter->pmac_id[0], 0);
        return 0;
 }
 
@@ -4341,6 +4395,7 @@ static int be_func_init(struct be_adapter *adapter)
 static int be_setup(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
+       u32 en_flags;
        int status;
 
        status = be_func_init(adapter);
@@ -4363,8 +4418,11 @@ static int be_setup(struct be_adapter *adapter)
        if (status)
                goto err;
 
-       status = be_if_create(adapter, &adapter->if_handle,
-                             be_if_cap_flags(adapter), 0);
+       /* will enable all the needed filter flags in be_open() */
+       en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+       en_flags = en_flags & be_if_cap_flags(adapter);
+       status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+                                 &adapter->if_handle, 0);
        if (status)
                goto err;
 
@@ -4390,11 +4448,6 @@ static int be_setup(struct be_adapter *adapter)
                dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
        }
 
-       if (adapter->vlans_added)
-               be_vid_config(adapter);
-
-       be_set_rx_mode(adapter->netdev);
-
        status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                         adapter->rx_fc);
        if (status)
@@ -5120,7 +5173,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
        struct device *dev = &adapter->pdev->dev;
        int status;
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5167,7 +5220,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->vxlan_port != port)
index 24a85b292007c9e2b9e662e70b5036ae17c969d5..63c2bcf8031a8a8cc39c714bb1510e3f3ae03793 100644 (file)
@@ -150,6 +150,9 @@ static void nps_enet_tx_handler(struct net_device *ndev)
        if (!priv->tx_packet_sent || tx_ctrl.ct)
                return;
 
+       /* Ack Tx ctrl register */
+       nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
+
        /* Check Tx transmit error */
        if (unlikely(tx_ctrl.et)) {
                ndev->stats.tx_errors++;
@@ -158,11 +161,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
                ndev->stats.tx_bytes += tx_ctrl.nt;
        }
 
-       if (priv->tx_skb) {
-               dev_kfree_skb(priv->tx_skb);
-               priv->tx_skb = NULL;
-       }
-
+       dev_kfree_skb(priv->tx_skb);
        priv->tx_packet_sent = false;
 
        if (netif_queue_stopped(ndev))
@@ -180,15 +179,16 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
 {
        struct net_device *ndev = napi->dev;
        struct nps_enet_priv *priv = netdev_priv(ndev);
-       struct nps_enet_buf_int_enable buf_int_enable;
        u32 work_done;
 
-       buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
-       buf_int_enable.tx_done = NPS_ENET_ENABLE;
        nps_enet_tx_handler(ndev);
        work_done = nps_enet_rx_handler(ndev);
        if (work_done < budget) {
+               struct nps_enet_buf_int_enable buf_int_enable;
+
                napi_complete(napi);
+               buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+               buf_int_enable.tx_done = NPS_ENET_ENABLE;
                nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
                                 buf_int_enable.value);
        }
@@ -211,12 +211,13 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
 {
        struct net_device *ndev = dev_instance;
        struct nps_enet_priv *priv = netdev_priv(ndev);
-       struct nps_enet_buf_int_cause buf_int_cause;
+       struct nps_enet_rx_ctl rx_ctrl;
+       struct nps_enet_tx_ctl tx_ctrl;
 
-       buf_int_cause.value =
-                       nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE);
+       rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+       tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
 
-       if (buf_int_cause.tx_done || buf_int_cause.rx_rdy)
+       if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
                if (likely(napi_schedule_prep(&priv->napi))) {
                        nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
                        __napi_schedule(&priv->napi);
@@ -307,11 +308,8 @@ static void nps_enet_hw_enable_control(struct net_device *ndev)
 
        /* Discard Packets bigger than max frame length */
        max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
-       if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+       if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH)
                ge_mac_cfg_3->max_len = max_frame_length;
-               nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
-                                ge_mac_cfg_3->value);
-       }
 
        /* Enable interrupts */
        buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
@@ -339,11 +337,14 @@ static void nps_enet_hw_enable_control(struct net_device *ndev)
        ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
        ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
        ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
+       ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE;
 
        /* Enable Rx and Tx */
        ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
        ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
 
+       nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
+                        ge_mac_cfg_3->value);
        nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
                         ge_mac_cfg_0.value);
 }
@@ -527,10 +528,10 @@ static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
        /* This driver handles one frame at a time  */
        netif_stop_queue(ndev);
 
-       nps_enet_send_frame(ndev, skb);
-
        priv->tx_skb = skb;
 
+       nps_enet_send_frame(ndev, skb);
+
        return NETDEV_TX_OK;
 }
 
index fc45c9daa1c2dfb6310e210f49f1958f666cd28a..6703674d679c964c00cac0ea12e084267df1ee47 100644 (file)
@@ -36,7 +36,6 @@
 #define NPS_ENET_REG_RX_CTL            0x810
 #define NPS_ENET_REG_RX_BUF            0x818
 #define NPS_ENET_REG_BUF_INT_ENABLE    0x8C0
-#define NPS_ENET_REG_BUF_INT_CAUSE     0x8C4
 #define NPS_ENET_REG_GE_MAC_CFG_0      0x1000
 #define NPS_ENET_REG_GE_MAC_CFG_1      0x1004
 #define NPS_ENET_REG_GE_MAC_CFG_2      0x1008
@@ -108,25 +107,6 @@ struct nps_enet_buf_int_enable {
        };
 };
 
-/* Interrupt cause for data buffer events register */
-struct nps_enet_buf_int_cause {
-       union {
-               /* tx_done: Interrupt in the case when current frame was
-                *          read from TX buffer.
-                * rx_rdy:  Interrupt in the case when new frame is ready
-                *          in RX buffer.
-                */
-               struct {
-                       u32
-                       __reserved:30,
-                       tx_done:1,
-                       rx_rdy:1;
-               };
-
-               u32 value;
-       };
-};
-
 /* Gbps Eth MAC Configuration 0 register */
 struct nps_enet_ge_mac_cfg_0 {
        union {
index 32e3807c650ea7256b09f0c9e406a8219cb2bb78..787da8e54e9937eb50b48102a29f6606c90250d2 100644 (file)
@@ -364,7 +364,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
        return 0;
 }
 
-static int
+static struct bufdesc *
 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                             struct sk_buff *skb,
                             struct net_device *ndev)
@@ -439,10 +439,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                bdp->cbd_sc = status;
        }
 
-       txq->cur_tx = bdp;
-
-       return 0;
-
+       return bdp;
 dma_mapping_error:
        bdp = txq->cur_tx;
        for (i = 0; i < frag; i++) {
@@ -450,7 +447,7 @@ dma_mapping_error:
                dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                bdp->cbd_datlen, DMA_TO_DEVICE);
        }
-       return NETDEV_TX_OK;
+       return ERR_PTR(-ENOMEM);
 }
 
 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
@@ -467,7 +464,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
        unsigned int estatus = 0;
        unsigned int index;
        int entries_free;
-       int ret;
 
        entries_free = fec_enet_get_free_txdesc_num(fep, txq);
        if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -485,6 +481,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
 
        /* Fill in a Tx ring entry */
        bdp = txq->cur_tx;
+       last_bdp = bdp;
        status = bdp->cbd_sc;
        status &= ~BD_ENET_TX_STATS;
 
@@ -513,9 +510,9 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
        }
 
        if (nr_frags) {
-               ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
-               if (ret)
-                       return ret;
+               last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
+               if (IS_ERR(last_bdp))
+                       return NETDEV_TX_OK;
        } else {
                status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
                if (fep->bufdesc_ex) {
@@ -544,7 +541,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
                ebdp->cbd_esc = estatus;
        }
 
-       last_bdp = txq->cur_tx;
        index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
        /* Save skb pointer */
        txq->tx_skbuff[index] = skb;
@@ -563,6 +559,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
 
        skb_tx_timestamp(skb);
 
+       /* Make sure the update to bdp and tx_skbuff are performed before
+        * cur_tx.
+        */
+       wmb();
        txq->cur_tx = bdp;
 
        /* Trigger transmission start */
@@ -1218,10 +1218,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
        /* get next bdp of dirty_tx */
        bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
 
-       while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
-
-               /* current queue is empty */
-               if (bdp == txq->cur_tx)
+       while (bdp != READ_ONCE(txq->cur_tx)) {
+               /* Order the load of cur_tx and cbd_sc */
+               rmb();
+               status = READ_ONCE(bdp->cbd_sc);
+               if (status & BD_ENET_TX_READY)
                        break;
 
                index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
@@ -1275,6 +1276,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
 
+               /* Make sure the update to bdp and tx_skbuff are performed
+                * before dirty_tx
+                */
+               wmb();
                txq->dirty_tx = bdp;
 
                /* Update pointer to next buffer descriptor to be transmitted */
@@ -1774,7 +1779,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
        int ret = 0;
 
        ret = pm_runtime_get_sync(dev);
-       if (IS_ERR_VALUE(ret))
+       if (ret < 0)
                return ret;
 
        fep->mii_timeout = 0;
@@ -1813,7 +1818,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        int ret = 0;
 
        ret = pm_runtime_get_sync(dev);
-       if (IS_ERR_VALUE(ret))
+       if (ret < 0)
                return ret;
 
        fep->mii_timeout = 0;
@@ -2865,7 +2870,7 @@ fec_enet_open(struct net_device *ndev)
        int ret;
 
        ret = pm_runtime_get_sync(&fep->pdev->dev);
-       if (IS_ERR_VALUE(ret))
+       if (ret < 0)
                return ret;
 
        pinctrl_pm_select_default_state(&fep->pdev->dev);
@@ -3433,6 +3438,7 @@ fec_probe(struct platform_device *pdev)
 
        pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
        pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
 
index 56316db6c5a674fd1d17ef20e8db3747950cf71e..cf8e54652df95266e24a5d6d64ed67c00336f67b 100644 (file)
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        frag = skb_shinfo(skb)->frags;
        while (nr_frags) {
                CBDC_SC(bdp,
-                       BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+                       BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+                       BD_ENET_TX_TC);
                CBDS_SC(bdp, BD_ENET_TX_READY);
 
                if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
index b34214e2df5f6e55bdbb29e6a8016c139e74345c..016743e355de31984d57904e802d69e6703ea5e4 100644 (file)
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
 }
 
 #define FEC_NAPI_RX_EVENT_MSK  (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK  (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_NAPI_TX_EVENT_MSK  (FEC_ENET_TXF)
 #define FEC_RX_EVENT           (FEC_ENET_RXF)
 #define FEC_TX_EVENT           (FEC_ENET_TXF)
 #define FEC_ERR_EVENT_MSK      (FEC_ENET_HBERR | FEC_ENET_BABR | \
index 087ffcdc48a312d365ffb24ee4f7c16ddcf18edb..4b69d061d90f7983fb0ee4929b7f6074922d3690 100644 (file)
@@ -2067,6 +2067,11 @@ int startup_gfar(struct net_device *ndev)
        /* Start Rx/Tx DMA and enable the interrupts */
        gfar_start(priv);
 
+       /* force link state update after mac reset */
+       priv->oldlink = 0;
+       priv->oldspeed = 0;
+       priv->oldduplex = -1;
+
        phy_start(priv->phydev);
 
        enable_napi(priv);
index 555e461b0cfe272e9944f594e2a584faef242f03..6bdc89179b72d6487d031ecda8551453b9268cd4 100644 (file)
@@ -902,27 +902,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
        return 0;
 }
 
-static int gfar_comp_asc(const void *a, const void *b)
-{
-       return memcmp(a, b, 4);
-}
-
-static int gfar_comp_desc(const void *a, const void *b)
-{
-       return -memcmp(a, b, 4);
-}
-
-static void gfar_swap(void *a, void *b, int size)
-{
-       u32 *_a = a;
-       u32 *_b = b;
-
-       swap(_a[0], _b[0]);
-       swap(_a[1], _b[1]);
-       swap(_a[2], _b[2]);
-       swap(_a[3], _b[3]);
-}
-
 /* Write a mask to filer cache */
 static void gfar_set_mask(u32 mask, struct filer_table *tab)
 {
@@ -1272,310 +1251,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
        return 0;
 }
 
-/* Copy size filer entries */
-static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
-                                   struct gfar_filer_entry src[0], s32 size)
-{
-       while (size > 0) {
-               size--;
-               dst[size].ctrl = src[size].ctrl;
-               dst[size].prop = src[size].prop;
-       }
-}
-
-/* Delete the contents of the filer-table between start and end
- * and collapse them
- */
-static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-{
-       int length;
-
-       if (end > MAX_FILER_CACHE_IDX || end < begin)
-               return -EINVAL;
-
-       end++;
-       length = end - begin;
-
-       /* Copy */
-       while (end < tab->index) {
-               tab->fe[begin].ctrl = tab->fe[end].ctrl;
-               tab->fe[begin++].prop = tab->fe[end++].prop;
-
-       }
-       /* Fill up with don't cares */
-       while (begin < tab->index) {
-               tab->fe[begin].ctrl = 0x60;
-               tab->fe[begin].prop = 0xFFFFFFFF;
-               begin++;
-       }
-
-       tab->index -= length;
-       return 0;
-}
-
-/* Make space on the wanted location */
-static int gfar_expand_filer_entries(u32 begin, u32 length,
-                                    struct filer_table *tab)
-{
-       if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
-           begin > MAX_FILER_CACHE_IDX)
-               return -EINVAL;
-
-       gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
-                               tab->index - length + 1);
-
-       tab->index += length;
-       return 0;
-}
-
-static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-{
-       for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
-            start++) {
-               if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
-                   (RQFCR_AND | RQFCR_CLE))
-                       return start;
-       }
-       return -1;
-}
-
-static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-{
-       for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
-            start++) {
-               if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
-                   (RQFCR_CLE))
-                       return start;
-       }
-       return -1;
-}
-
-/* Uses hardwares clustering option to reduce
- * the number of filer table entries
- */
-static void gfar_cluster_filer(struct filer_table *tab)
-{
-       s32 i = -1, j, iend, jend;
-
-       while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
-               j = i;
-               while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
-                       /* The cluster entries self and the previous one
-                        * (a mask) must be identical!
-                        */
-                       if (tab->fe[i].ctrl != tab->fe[j].ctrl)
-                               break;
-                       if (tab->fe[i].prop != tab->fe[j].prop)
-                               break;
-                       if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
-                               break;
-                       if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
-                               break;
-                       iend = gfar_get_next_cluster_end(i, tab);
-                       jend = gfar_get_next_cluster_end(j, tab);
-                       if (jend == -1 || iend == -1)
-                               break;
-
-                       /* First we make some free space, where our cluster
-                        * element should be. Then we copy it there and finally
-                        * delete in from its old location.
-                        */
-                       if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
-                           -EINVAL)
-                               break;
-
-                       gfar_copy_filer_entries(&(tab->fe[iend + 1]),
-                                               &(tab->fe[jend + 1]), jend - j);
-
-                       if (gfar_trim_filer_entries(jend - 1,
-                                                   jend + (jend - j),
-                                                   tab) == -EINVAL)
-                               return;
-
-                       /* Mask out cluster bit */
-                       tab->fe[iend].ctrl &= ~(RQFCR_CLE);
-               }
-       }
-}
-
-/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-static void gfar_swap_bits(struct gfar_filer_entry *a1,
-                          struct gfar_filer_entry *a2,
-                          struct gfar_filer_entry *b1,
-                          struct gfar_filer_entry *b2, u32 mask)
-{
-       u32 temp[4];
-       temp[0] = a1->ctrl & mask;
-       temp[1] = a2->ctrl & mask;
-       temp[2] = b1->ctrl & mask;
-       temp[3] = b2->ctrl & mask;
-
-       a1->ctrl &= ~mask;
-       a2->ctrl &= ~mask;
-       b1->ctrl &= ~mask;
-       b2->ctrl &= ~mask;
-
-       a1->ctrl |= temp[1];
-       a2->ctrl |= temp[0];
-       b1->ctrl |= temp[3];
-       b2->ctrl |= temp[2];
-}
-
-/* Generate a list consisting of masks values with their start and
- * end of validity and block as indicator for parts belonging
- * together (glued by ANDs) in mask_table
- */
-static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
-                                   struct filer_table *tab)
-{
-       u32 i, and_index = 0, block_index = 1;
-
-       for (i = 0; i < tab->index; i++) {
-
-               /* LSByte of control = 0 sets a mask */
-               if (!(tab->fe[i].ctrl & 0xF)) {
-                       mask_table[and_index].mask = tab->fe[i].prop;
-                       mask_table[and_index].start = i;
-                       mask_table[and_index].block = block_index;
-                       if (and_index >= 1)
-                               mask_table[and_index - 1].end = i - 1;
-                       and_index++;
-               }
-               /* cluster starts and ends will be separated because they should
-                * hold their position
-                */
-               if (tab->fe[i].ctrl & RQFCR_CLE)
-                       block_index++;
-               /* A not set AND indicates the end of a depended block */
-               if (!(tab->fe[i].ctrl & RQFCR_AND))
-                       block_index++;
-       }
-
-       mask_table[and_index - 1].end = i - 1;
-
-       return and_index;
-}
-
-/* Sorts the entries of mask_table by the values of the masks.
- * Important: The 0xFF80 flags of the first and last entry of a
- * block must hold their position (which queue, CLusterEnable, ReJEct,
- * AND)
- */
-static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
-                                struct filer_table *temp_table, u32 and_index)
-{
-       /* Pointer to compare function (_asc or _desc) */
-       int (*gfar_comp)(const void *, const void *);
-
-       u32 i, size = 0, start = 0, prev = 1;
-       u32 old_first, old_last, new_first, new_last;
-
-       gfar_comp = &gfar_comp_desc;
-
-       for (i = 0; i < and_index; i++) {
-               if (prev != mask_table[i].block) {
-                       old_first = mask_table[start].start + 1;
-                       old_last = mask_table[i - 1].end;
-                       sort(mask_table + start, size,
-                            sizeof(struct gfar_mask_entry),
-                            gfar_comp, &gfar_swap);
-
-                       /* Toggle order for every block. This makes the
-                        * thing more efficient!
-                        */
-                       if (gfar_comp == gfar_comp_desc)
-                               gfar_comp = &gfar_comp_asc;
-                       else
-                               gfar_comp = &gfar_comp_desc;
-
-                       new_first = mask_table[start].start + 1;
-                       new_last = mask_table[i - 1].end;
-
-                       gfar_swap_bits(&temp_table->fe[new_first],
-                                      &temp_table->fe[old_first],
-                                      &temp_table->fe[new_last],
-                                      &temp_table->fe[old_last],
-                                      RQFCR_QUEUE | RQFCR_CLE |
-                                      RQFCR_RJE | RQFCR_AND);
-
-                       start = i;
-                       size = 0;
-               }
-               size++;
-               prev = mask_table[i].block;
-       }
-}
-
-/* Reduces the number of masks needed in the filer table to save entries
- * This is done by sorting the masks of a depended block. A depended block is
- * identified by gluing ANDs or CLE. The sorting order toggles after every
- * block. Of course entries in scope of a mask must change their location with
- * it.
- */
-static int gfar_optimize_filer_masks(struct filer_table *tab)
-{
-       struct filer_table *temp_table;
-       struct gfar_mask_entry *mask_table;
-
-       u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
-       s32 ret = 0;
-
-       /* We need a copy of the filer table because
-        * we want to change its order
-        */
-       temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
-       if (temp_table == NULL)
-               return -ENOMEM;
-
-       mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
-                            sizeof(struct gfar_mask_entry), GFP_KERNEL);
-
-       if (mask_table == NULL) {
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       and_index = gfar_generate_mask_table(mask_table, tab);
-
-       gfar_sort_mask_table(mask_table, temp_table, and_index);
-
-       /* Now we can copy the data from our duplicated filer table to
-        * the real one in the order the mask table says
-        */
-       for (i = 0; i < and_index; i++) {
-               size = mask_table[i].end - mask_table[i].start + 1;
-               gfar_copy_filer_entries(&(tab->fe[j]),
-                               &(temp_table->fe[mask_table[i].start]), size);
-               j += size;
-       }
-
-       /* And finally we just have to check for duplicated masks and drop the
-        * second ones
-        */
-       for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
-               if (tab->fe[i].ctrl == 0x80) {
-                       previous_mask = i++;
-                       break;
-               }
-       }
-       for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
-               if (tab->fe[i].ctrl == 0x80) {
-                       if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
-                               /* Two identical ones found!
-                                * So drop the second one!
-                                */
-                               gfar_trim_filer_entries(i, i, tab);
-                       } else
-                               /* Not identical! */
-                               previous_mask = i;
-               }
-       }
-
-       kfree(mask_table);
-end:   kfree(temp_table);
-       return ret;
-}
-
 /* Write the bit-pattern from software's buffer to hardware registers */
 static int gfar_write_filer_table(struct gfar_private *priv,
                                  struct filer_table *tab)
@@ -1585,11 +1260,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
                return -EBUSY;
 
        /* Fill regular entries */
-       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
-            i++)
+       for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
                gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
        /* Fill the rest with fall-troughs */
-       for (; i < MAX_FILER_IDX - 1; i++)
+       for (; i < MAX_FILER_IDX; i++)
                gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
        /* Last entry must be default accept
         * because that's what people expect
@@ -1623,7 +1297,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
 {
        struct ethtool_flow_spec_container *j;
        struct filer_table *tab;
-       s32 i = 0;
        s32 ret = 0;
 
        /* So index is set to zero, too! */
@@ -1648,17 +1321,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
                }
        }
 
-       i = tab->index;
-
-       /* Optimizations to save entries */
-       gfar_cluster_filer(tab);
-       gfar_optimize_filer_masks(tab);
-
-       pr_debug("\tSummary:\n"
-                "\tData on hardware: %d\n"
-                "\tCompression rate: %d%%\n",
-                tab->index, 100 - (100 * tab->index) / i);
-
        /* Write everything to hardware */
        ret = gfar_write_filer_table(priv, tab);
        if (ret == -EBUSY) {
@@ -1724,13 +1386,14 @@ static int gfar_add_cls(struct gfar_private *priv,
        }
 
 process:
+       priv->rx_list.count++;
        ret = gfar_process_filer_changes(priv);
        if (ret)
                goto clean_list;
-       priv->rx_list.count++;
        return ret;
 
 clean_list:
+       priv->rx_list.count--;
        list_del(&temp->list);
 clean_mem:
        kfree(temp);
index d2657a412768839145b57c656a2349cb750a146a..068789e694c9b310ca6fd541a2be90f6c5059e36 100644 (file)
@@ -1770,8 +1770,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
        dma_addr = pci_map_single(nic->pdev,
                                  skb->data, skb->len, PCI_DMA_TODEVICE);
        /* If we can't map the skb, have the upper layer try later */
-       if (pci_dma_mapping_error(nic->pdev, dma_addr))
+       if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+               dev_kfree_skb_any(skb);
+               skb = NULL;
                return -ENOMEM;
+       }
 
        /*
         * Use the last 4 bytes of the SKB payload packet as the CRC, used for
@@ -2967,6 +2970,11 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                           nic->params.cbs.max * sizeof(struct cb),
                           sizeof(u32),
                           0);
+       if (!nic->cbs_pool) {
+               netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
+               err = -ENOMEM;
+               goto err_out_pool;
+       }
        netif_info(nic, probe, nic->netdev,
                   "addr 0x%llx, irq %d, MAC addr %pM\n",
                   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
@@ -2974,6 +2982,8 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        return 0;
 
+err_out_pool:
+       unregister_netdev(netdev);
 err_out_free:
        e100_free(nic);
 err_out_iounmap:
index 26459853c6be2133917a69c3fdba66d890a00ef7..34c551e322ebd30a6b3dd4bf994f3df957228db6 100644 (file)
 #define E1000_FEXTNVM11_DISABLE_MULR_FIX       0x00002000
 
 /* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
-#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
+#define E1000_RXDCTL_THRESH_UNIT_DESC  0x01000000
 
 #define K1_ENTRY_LATENCY       0
 #define K1_MIN_TIME            1
 #define NVM_SIZE_MULTIPLIER 4096       /*multiplier for NVMS field */
 #define E1000_FLASH_BASE_ADDR 0xE000   /*offset of NVM access regs */
 #define E1000_CTRL_EXT_NVMVS 0x3       /*NVM valid sector */
-
+#define E1000_TARC0_CB_MULTIQ_3_REQ    (1 << 28 | 1 << 29)
 #define PCIE_ICH8_SNOOP_ALL    PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES  7
index fea1601f32a3614122a75e03fd98e786cab2ac2a..faf4b3f3d0b53ed5ecbf17c87507bba0e45ff1df 100644 (file)
@@ -48,7 +48,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -1737,12 +1737,6 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
        adapter->flags2 &= ~FLAG2_IS_DISCARDING;
-
-       writel(0, rx_ring->head);
-       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-               e1000e_update_rdt_wa(rx_ring, 0);
-       else
-               writel(0, rx_ring->tail);
 }
 
 static void e1000e_downshift_workaround(struct work_struct *work)
@@ -2447,12 +2441,6 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-
-       writel(0, tx_ring->head);
-       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-               e1000e_update_tdt_wa(tx_ring, 0);
-       else
-               writel(0, tx_ring->tail);
 }
 
 /**
@@ -2954,6 +2942,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
        tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
 
+       writel(0, tx_ring->head);
+       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+               e1000e_update_tdt_wa(tx_ring, 0);
+       else
+               writel(0, tx_ring->tail);
+
        /* Set the Tx Interrupt Delay register */
        ew32(TIDV, adapter->tx_int_delay);
        /* Tx irq moderation */
@@ -3275,6 +3269,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
        rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
        rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
 
+       writel(0, rx_ring->head);
+       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+               e1000e_update_rdt_wa(rx_ring, 0);
+       else
+               writel(0, rx_ring->tail);
+
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
        if (adapter->netdev->features & NETIF_F_RXCSUM)
@@ -4280,18 +4280,29 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
        struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
                                                     cc);
        struct e1000_hw *hw = &adapter->hw;
+       u32 systimel_1, systimel_2, systimeh;
        cycle_t systim, systim_next;
-       /* SYSTIMH latching upon SYSTIML read does not work well. To fix that
-        * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
-        * to occur between reads, so if we read a vale close to overflow, we
-        * wait for overflow to occur and read both registers when its safe.
+       /* SYSTIMH latching upon SYSTIML read does not work well.
+        * This means that if SYSTIML overflows after we read it but before
+        * we read SYSTIMH, the value of SYSTIMH has been incremented and we
+        * will experience a huge non linear increment in the systime value
+        * to fix that we test for overflow and if true, we re-read systime.
         */
-       u32 systim_overflow_latch_fix = 0x3FFFFFFF;
-
-       do {
-               systim = (cycle_t)er32(SYSTIML);
-       } while (systim > systim_overflow_latch_fix);
-       systim |= (cycle_t)er32(SYSTIMH) << 32;
+       systimel_1 = er32(SYSTIML);
+       systimeh = er32(SYSTIMH);
+       systimel_2 = er32(SYSTIML);
+       /* Check for overflow. If there was no overflow, use the values */
+       if (systimel_1 < systimel_2) {
+               systim = (cycle_t)systimel_1;
+               systim |= (cycle_t)systimeh << 32;
+       } else {
+               /* There was an overflow, read again SYSTIMH, and use
+                * systimel_2
+                */
+               systimeh = er32(SYSTIMH);
+               systim = (cycle_t)systimel_2;
+               systim |= (cycle_t)systimeh << 32;
+       }
 
        if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
                u64 incvalue, time_delta, rem, temp;
@@ -6317,6 +6328,33 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
                        return retval;
        }
 
+       /* Ensure that the appropriate bits are set in LPI_CTRL
+        * for EEE in Sx
+        */
+       if ((hw->phy.type >= e1000_phy_i217) &&
+           adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) {
+               u16 lpi_ctrl = 0;
+
+               retval = hw->phy.ops.acquire(hw);
+               if (!retval) {
+                       retval = e1e_rphy_locked(hw, I82579_LPI_CTRL,
+                                                &lpi_ctrl);
+                       if (!retval) {
+                               if (adapter->eee_advert &
+                                   hw->dev_spec.ich8lan.eee_lp_ability &
+                                   I82579_EEE_100_SUPPORTED)
+                                       lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+                               if (adapter->eee_advert &
+                                   hw->dev_spec.ich8lan.eee_lp_ability &
+                                   I82579_EEE_1000_SUPPORTED)
+                                       lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+                               retval = e1e_wphy_locked(hw, I82579_LPI_CTRL,
+                                                        lpi_ctrl);
+                       }
+               }
+               hw->phy.ops.release(hw);
+       }
 
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
@@ -6466,7 +6504,7 @@ static int __e1000_resume(struct pci_dev *pdev)
        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
                aspm_disable_flag |= PCIE_LINK_STATE_L1;
        if (aspm_disable_flag)
-               e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
+               e1000e_disable_aspm(pdev, aspm_disable_flag);
 
        pci_set_master(pdev);
 
@@ -6744,7 +6782,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
                aspm_disable_flag |= PCIE_LINK_STATE_L1;
        if (aspm_disable_flag)
-               e1000e_disable_aspm(pdev, aspm_disable_flag);
+               e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
 
        err = pci_enable_device_mem(pdev);
        if (err) {
index b24e5fee17f2d5f7a514213e882bae67b43962c0..1d5e0b77062a0ea3341dafcab898bd8163bb105f 100644 (file)
@@ -38,8 +38,8 @@
 #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
 #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
 #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
-#define E1000_FEXTNVM9 0x5BB4  /* Future Extended NVM 9 - RW */
-#define E1000_FEXTNVM11        0x5BBC  /* Future Extended NVM 11 - RW */
+#define E1000_FEXTNVM9 0x5BB4  /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11        0x5BBC  /* Future Extended NVM 11 - RW */
 #define E1000_PCIEANACFG       0x00F18 /* PCIE Analog Config */
 #define E1000_FCT      0x00030 /* Flow Control Type - RW */
 #define E1000_VET      0x00038 /* VLAN Ether Type - RW */
                                 (0x054E4 + ((_i - 16) * 8)))
 #define E1000_SHRAL(_i)                (0x05438 + ((_i) * 8))
 #define E1000_SHRAH(_i)                (0x0543C + ((_i) * 8))
-#define E1000_TARC0_CB_MULTIQ_3_REQ    (1 << 28 | 1 << 29)
 #define E1000_TDFH             0x03410 /* Tx Data FIFO Head - RW */
 #define E1000_TDFT             0x03418 /* Tx Data FIFO Tail - RW */
 #define E1000_TDFHS            0x03420 /* Tx Data FIFO Head Saved - RW */
index 281fd8456146190427a0390cedd3cfb2806d31af..0f97883c149300fdfb30825cb4731a74cd85b553 100644 (file)
 #define I40E_MIN_MSIX                 2
 #define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */
 #define I40E_MIN_VSI_ALLOC            51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
-#define I40E_DEFAULT_QUEUES_PER_VMDQ  2 /* max 16 qps */
+/* max 16 qps */
+#define i40e_default_queues_per_vmdq(pf) \
+               (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
 #define I40E_DEFAULT_QUEUES_PER_VF    4
 #define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
-#define I40E_MAX_QUEUES_PER_TC        64 /* should be a power of 2 */
+#define i40e_pf_get_max_q_per_tc(pf) \
+               (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
 #define I40E_FDIR_RING                0
 #define I40E_FDIR_RING_COUNT          32
 #ifdef I40E_FCOE
@@ -298,6 +301,7 @@ struct i40e_pf {
 #define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
 #define I40E_FLAG_FDIR_REQUIRES_REINIT         BIT_ULL(8)
 #define I40E_FLAG_NEED_LINK_UPDATE             BIT_ULL(9)
+#define I40E_FLAG_IWARP_ENABLED                        BIT_ULL(10)
 #ifdef I40E_FCOE
 #define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
 #endif /* I40E_FCOE */
@@ -318,6 +322,12 @@ struct i40e_pf {
 #endif
 #define I40E_FLAG_PORT_ID_VALID                        BIT_ULL(28)
 #define I40E_FLAG_DCB_CAPABLE                  BIT_ULL(29)
+#define I40E_FLAG_RSS_AQ_CAPABLE               BIT_ULL(31)
+#define I40E_FLAG_HW_ATR_EVICT_CAPABLE         BIT_ULL(32)
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE       BIT_ULL(33)
+#define I40E_FLAG_128_QP_RSS_CAPABLE           BIT_ULL(34)
+#define I40E_FLAG_WB_ON_ITR_CAPABLE            BIT_ULL(35)
+#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE  BIT_ULL(38)
 #define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
 
        /* tracks features that get auto disabled by errors */
@@ -550,6 +560,7 @@ struct i40e_q_vector {
        cpumask_t affinity_mask;
        struct rcu_head rcu;    /* to avoid race with update stats on free */
        char name[I40E_INT_NAME_STR_LEN];
+       bool arm_wb_state;
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
index 9101f5c00f37104dd993049e6a34e9103d5a627a..95d23bfbcbf11bbb17d9f6a133d1e0ed5ff44187 100644 (file)
@@ -257,6 +257,10 @@ enum i40e_admin_queue_opc {
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
        i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
+       i40e_aqc_opc_set_rss_key        = 0x0B02,
+       i40e_aqc_opc_set_rss_lut        = 0x0B03,
+       i40e_aqc_opc_get_rss_key        = 0x0B04,
+       i40e_aqc_opc_get_rss_lut        = 0x0B05,
 
        /* Async Events */
        i40e_aqc_opc_event_lan_overflow         = 0x1001,
@@ -821,8 +825,12 @@ struct i40e_aqc_vsi_properties_data {
                                         I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
        /* queueing option section */
        u8      queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA  0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA    0x08
 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA    0x10
 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA   0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI        0x40
        u8      queueing_opt_reserved[3];
        /* scheduler section */
        u8      up_enable_bits;
@@ -2179,6 +2187,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT      0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK       (0x3FF << \
+                                       I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+       __le16  vsi_id;
+       u8      reserved[6];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+       u8 standard_rss_key[0x28];
+       u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct  i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT      0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK       (0x3FF << \
+                                       I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+       __le16  vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT  0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK   (0x1 << \
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI    0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF     1
+       __le16  flags;
+       u8      reserved[4];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
 /* tunnel key structure 0x0B10 */
 
 struct i40e_aqc_tunnel_key_structure {
index 167ca0d752ea8065c04a731395029d40eb90a888..114dc6450183b37a136e48ad9763ed9fb4c68cbc 100644 (file)
@@ -54,6 +54,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_20G_KR2:
                        hw->mac.type = I40E_MAC_XL710;
                        break;
+               case I40E_DEV_ID_SFP_X722:
+               case I40E_DEV_ID_1G_BASE_T_X722:
+               case I40E_DEV_ID_10G_BASE_T_X722:
+                       hw->mac.type = I40E_MAC_X722;
+                       break;
+               case I40E_DEV_ID_X722_VF:
+               case I40E_DEV_ID_X722_VF_HV:
+                       hw->mac.type = I40E_MAC_X722_VF;
+                       break;
                case I40E_DEV_ID_VF:
                case I40E_DEV_ID_VF_HV:
                        hw->mac.type = I40E_MAC_VF;
@@ -383,6 +392,169 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
        return status;
 }
 
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+                                          u16 vsi_id, bool pf_lut,
+                                          u8 *lut, u16 lut_size,
+                                          bool set)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_set_rss_lut *cmd_resp =
+                  (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+       if (set)
+               i40e_fill_default_direct_cmd_desc(&desc,
+                                                 i40e_aqc_opc_set_rss_lut);
+       else
+               i40e_fill_default_direct_cmd_desc(&desc,
+                                                 i40e_aqc_opc_get_rss_lut);
+
+       /* Indirect command */
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+       cmd_resp->vsi_id =
+                       cpu_to_le16((u16)((vsi_id <<
+                                         I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+                                         I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+       cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+       if (pf_lut)
+               cmd_resp->flags |= cpu_to_le16((u16)
+                                       ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+       else
+               cmd_resp->flags |= cpu_to_le16((u16)
+                                       ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+       cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+       cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+       status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+                               bool pf_lut, u8 *lut, u16 lut_size)
+{
+       return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+                                      false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+                               bool pf_lut, u8 *lut, u16 lut_size)
+{
+       return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+                                     u16 vsi_id,
+                                     struct i40e_aqc_get_set_rss_key_data *key,
+                                     bool set)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_set_rss_key *cmd_resp =
+                       (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+       u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+       if (set)
+               i40e_fill_default_direct_cmd_desc(&desc,
+                                                 i40e_aqc_opc_set_rss_key);
+       else
+               i40e_fill_default_direct_cmd_desc(&desc,
+                                                 i40e_aqc_opc_get_rss_key);
+
+       /* Indirect command */
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+       cmd_resp->vsi_id =
+                       cpu_to_le16((u16)((vsi_id <<
+                                         I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+                                         I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+       cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+       cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+       cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+       status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+                               u16 vsi_id,
+                               struct i40e_aqc_get_set_rss_key_data *key)
+{
+       return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+                               u16 vsi_id,
+                               struct i40e_aqc_get_set_rss_key_data *key)
+{
+       return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
  * packet type.
@@ -769,6 +941,7 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
 
        switch (hw->mac.type) {
        case I40E_MAC_XL710:
+       case I40E_MAC_X722:
                break;
        default:
                return I40E_ERR_DEVICE_NOT_SUPPORTED;
index 857d294d2a453c3097a3ead5c7a5fa27df001ae4..3bb832a2ec51a6997a7fab7d832176ce9a606977 100644 (file)
@@ -76,6 +76,9 @@ static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
        /* required last entry */
        {0, }
 };
@@ -1547,7 +1550,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
         */
        qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
        num_tc_qps = qcount / numtc;
-       num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
+       num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
 
        /* Setup queue offset/count for all TCs for given VSI */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -2905,6 +2908,9 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
              I40E_PFINT_ICR0_ENA_VFLR_MASK          |
              I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
 
+       if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+               val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+
        if (pf->flags & I40E_FLAG_PTP)
                val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
 
@@ -3195,6 +3201,13 @@ static irqreturn_t i40e_intr(int irq, void *data)
            (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
                pf->sw_int_count++;
 
+       if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+           (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+               ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+               icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+               dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
+       }
+
        /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
        if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
 
@@ -7068,6 +7081,10 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                tx_ring->count = vsi->num_desc;
                tx_ring->size = 0;
                tx_ring->dcb_tc = 0;
+               if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+                       tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+               if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
+                       tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
                vsi->tx_rings[i] = tx_ring;
 
                rx_ring = &tx_ring[1];
@@ -7466,62 +7483,139 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
 }
 
 /**
- * i40e_config_rss - Prepare for RSS if used
+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+       struct i40e_aqc_get_set_rss_key_data rss_key;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       bool pf_lut = false;
+       u8 *rss_lut;
+       int ret, i;
+
+       memset(&rss_key, 0, sizeof(rss_key));
+       memcpy(&rss_key, seed, sizeof(rss_key));
+
+       rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
+       if (!rss_lut)
+               return -ENOMEM;
+
+       /* Populate the LUT with max no. of queues in round robin fashion */
+       for (i = 0; i < vsi->rss_table_size; i++)
+               rss_lut[i] = i % vsi->rss_size;
+
+       ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Cannot set RSS key, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               return ret;
+       }
+
+       if (vsi->type == I40E_VSI_MAIN)
+               pf_lut = true;
+
+       ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
+                                 vsi->rss_table_size);
+       if (ret)
+               dev_info(&pf->pdev->dev,
+                        "Cannot set RSS lut, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+       return ret;
+}
+
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+       u8 seed[I40E_HKEY_ARRAY_SIZE];
+       struct i40e_pf *pf = vsi->back;
+
+       netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+       vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+               return i40e_config_rss_aq(vsi, seed);
+
+       return 0;
+}
+
+/**
+ * i40e_config_rss_reg - Prepare for RSS if used
  * @pf: board private structure
+ * @seed: RSS hash seed
  **/
-static int i40e_config_rss(struct i40e_pf *pf)
+static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
 {
-       u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_hw *hw = &pf->hw;
+       u32 *seed_dw = (u32 *)seed;
+       u32 current_queue = 0;
        u32 lut = 0;
        int i, j;
-       u64 hena;
-       u32 reg_val;
 
-       netdev_rss_key_fill(rss_key, sizeof(rss_key));
+       /* Fill out hash function seed */
        for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-               wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
+               wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+
+       for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               lut = 0;
+               for (j = 0; j < 4; j++) {
+                       if (current_queue == vsi->rss_size)
+                               current_queue = 0;
+                       lut |= ((current_queue) << (8 * j));
+                       current_queue++;
+               }
+               wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
+       }
+       i40e_flush(hw);
+
+       return 0;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       u8 seed[I40E_HKEY_ARRAY_SIZE];
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_val;
+       u64 hena;
+
+       netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
 
        /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
        hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
                ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
-       hena |= I40E_DEFAULT_RSS_HENA;
+       hena |= i40e_pf_get_default_rss_hena(pf);
+
        wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
        wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
        vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
 
-       /* Check capability and Set table size and register per hw expectation*/
+       /* Determine the RSS table size based on the hardware capabilities */
        reg_val = rd32(hw, I40E_PFQF_CTL_0);
-       if (pf->rss_table_size == 512)
-               reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
-       else
-               reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+       reg_val = (pf->rss_table_size == 512) ?
+                       (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+                       (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
        wr32(hw, I40E_PFQF_CTL_0, reg_val);
 
-       /* Populate the LUT with max no. of queues in round robin fashion */
-       for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
-
-               /* The assumption is that lan qp count will be the highest
-                * qp count for any PF VSI that needs RSS.
-                * If multiple VSIs need RSS support, all the qp counts
-                * for those VSIs should be a power of 2 for RSS to work.
-                * If LAN VSI is the only consumer for RSS then this requirement
-                * is not necessary.
-                */
-               if (j == vsi->rss_size)
-                       j = 0;
-               /* lut = 4-byte sliding window of 4 lut entries */
-               lut = (lut << 8) | (j &
-                        (BIT(pf->hw.func_caps.rss_table_entry_width) - 1));
-               /* On i = 3, we have 4 entries in lut; write to the register */
-               if ((i & 3) == 3)
-                       wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
-       }
-       i40e_flush(hw);
-
-       return 0;
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+               return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
+       else
+               return i40e_config_rss_reg(pf, seed);
 }
 
 /**
@@ -7762,9 +7856,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
        }
 
        if (pf->hw.func_caps.vmdq) {
-               pf->flags |= I40E_FLAG_VMDQ_ENABLED;
                pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
-               pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+               pf->flags |= I40E_FLAG_VMDQ_ENABLED;
        }
 
 #ifdef I40E_FCOE
@@ -7782,6 +7875,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
                                        I40E_MAX_VF_COUNT);
        }
 #endif /* CONFIG_PCI_IOV */
+       if (pf->hw.mac.type == I40E_MAC_X722) {
+               pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
+                            I40E_FLAG_128_QP_RSS_CAPABLE |
+                            I40E_FLAG_HW_ATR_EVICT_CAPABLE |
+                            I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
+                            I40E_FLAG_WB_ON_ITR_CAPABLE |
+                            I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
+       }
        pf->eeprom_version = 0xDEAD;
        pf->lan_veb = I40E_NO_VEB;
        pf->lan_vsi = I40E_NO_VSI;
@@ -8937,6 +9038,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                break;
        }
 
+       if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+           (vsi->type == I40E_VSI_VMDQ2)) {
+               ret = i40e_vsi_config_rss(vsi);
+       }
        return vsi;
 
 err_rings:
index ce986af213d2847d9e2b41f86aa9812aad0054a3..9b83abc0e774cbfa255889d75b962e43c8299c1b 100644 (file)
@@ -211,6 +211,74 @@ read_nvm_exit:
        return ret_code;
 }
 
+/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+                                   u32 offset, u16 words, void *data,
+                                   bool last_command)
+{
+       i40e_status ret_code = I40E_ERR_NVM;
+       struct i40e_asq_cmd_details cmd_details;
+
+       memset(&cmd_details, 0, sizeof(cmd_details));
+
+       /* Here we are checking the SR limit only for the flat memory model.
+        * We cannot do it for the module-based model, as we did not acquire
+        * the NVM resource yet (we cannot get the module pointer value).
+        * Firmware will check the module-based model.
+        */
+       if ((offset + words) > hw->nvm.sr_size)
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+                          (offset + words), hw->nvm.sr_size);
+       else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+               /* We can write only up to 4KB (one sector), in one AQ write */
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVM write fail error: tried to write %d words, limit is %d.\n",
+                          words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+       else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+                != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+               /* A single write cannot spread over two sectors */
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+                          offset, words);
+       else
+               ret_code = i40e_aq_read_nvm(hw, module_pointer,
+                                           2 * offset,  /*bytes*/
+                                           2 * words,   /*bytes*/
+                                           data, last_command, &cmd_details);
+
+       return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+                                        u16 *data)
+{
+       i40e_status ret_code = I40E_ERR_TIMEOUT;
+
+       ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+       *data = le16_to_cpu(*(__le16 *)data);
+
+       return ret_code;
+}
+
 /**
  * i40e_read_nvm_word - Reads Shadow RAM
  * @hw: pointer to the HW structure
@@ -222,6 +290,8 @@ read_nvm_exit:
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                               u16 *data)
 {
+       if (hw->mac.type == I40E_MAC_X722)
+               return i40e_read_nvm_word_aq(hw, offset, data);
        return i40e_read_nvm_word_srctl(hw, offset, data);
 }
 
@@ -256,6 +326,63 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
        return ret_code;
 }
 
+/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+                                          u16 *words, u16 *data)
+{
+       i40e_status ret_code;
+       u16 read_size = *words;
+       bool last_cmd = false;
+       u16 words_read = 0;
+       u16 i = 0;
+
+       do {
+               /* Calculate number of bytes we should read in this step.
+                * FVL AQ do not allow to read more than one page at a time or
+                * to cross page boundaries.
+                */
+               if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+                       read_size = min(*words,
+                                       (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+                                     (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+               else
+                       read_size = min((*words - words_read),
+                                       I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+               /* Check if this is last command, if so set proper flag */
+               if ((words_read + read_size) >= *words)
+                       last_cmd = true;
+
+               ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+                                           data + words_read, last_cmd);
+               if (ret_code)
+                       goto read_nvm_buffer_aq_exit;
+
+               /* Increment counter for words already read and move offset to
+                * new read location
+                */
+               words_read += read_size;
+               offset += read_size;
+       } while (words_read < *words);
+
+       for (i = 0; i < *words; i++)
+               data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+       *words = words_read;
+       return ret_code;
+}
+
 /**
  * i40e_read_nvm_buffer - Reads Shadow RAM buffer
  * @hw: pointer to the HW structure
@@ -270,6 +397,8 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
                                 u16 *words, u16 *data)
 {
+       if (hw->mac.type == I40E_MAC_X722)
+               return i40e_read_nvm_buffer_aq(hw, offset, words, data);
        return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
 }
 
index d52a9f7873b0c927a78accc7a25b23ab3867f153..dcb72a8ee8e590dfa390b43691cdbb3b39b9933d 100644 (file)
@@ -61,6 +61,17 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
 char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
 char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+                               bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+                               bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+                               u16 seid,
+                               struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+                               u16 seid,
+                               struct i40e_aqc_get_set_rss_key_data *key);
+
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
 
index 522d6df513300fffec88a2c2494a66c0d8e03db8..acae6c744bc2ee149f3bf3f325c3877569b3dc48 100644 (file)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
index 330e4ef43cd8fafc9a5a8985b0b713e9aec93e49..738aca68f665f8cad4382e0b57b6c744c24dab79 100644 (file)
@@ -853,15 +853,40 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
  **/
 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
-       u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
-                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
-                 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
-                 /* allow 00 to be written to the index */
-
-       wr32(&vsi->back->hw,
-            I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
-            val);
+       u16 flags = q_vector->tx.ring[0].flags;
+
+       if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+               u32 val;
+
+               if (q_vector->arm_wb_state)
+                       return;
+
+               val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
+
+               wr32(&vsi->back->hw,
+                    I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+                                        vsi->base_vector - 1),
+                    val);
+               q_vector->arm_wb_state = true;
+       } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+               u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                         I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
+                         I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                         I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+                         /* allow 00 to be written to the index */
+
+               wr32(&vsi->back->hw,
+                    I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+                                        vsi->base_vector - 1), val);
+       } else {
+               u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+                         I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
+                         I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+                         I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
+                       /* allow 00 to be written to the index */
+
+               wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+       }
 }
 
 /**
@@ -1404,7 +1429,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * so the total length of IPv4 header is IHL*4 bytes
         * The UDP_0 bit *may* bet set if the *inner* header is UDP
         */
-       if (ipv4_tunnel) {
+       if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
+           (ipv4_tunnel)) {
                skb->transport_header = skb->mac_header +
                                        sizeof(struct ethhdr) +
                                        (ip_hdr(skb)->ihl * 4);
@@ -1918,6 +1944,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                return budget;
        }
 
+       if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+               q_vector->arm_wb_state = false;
+
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
        if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2011,6 +2040,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        /* Due to lack of space, no more new filters can be programmed */
        if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
                return;
+       if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+               /* HW ATR eviction will take care of removing filters on FIN
+                * and RST packets.
+                */
+               if (th->fin || th->rst)
+                       return;
+       }
 
        tx_ring->atr_count++;
 
@@ -2066,6 +2102,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
                        I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
+       if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+               dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
+
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
        fdir_desc->rsvd = cpu_to_le32(0);
        fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
@@ -2273,11 +2312,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
        struct iphdr *this_ip_hdr;
        u32 network_hdr_len;
        u8 l4_hdr = 0;
+       struct udphdr *oudph;
+       struct iphdr *oiph;
        u32 l4_tunnel = 0;
 
        if (skb->encapsulation) {
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
+                       oudph = udp_hdr(skb);
+                       oiph = ip_hdr(skb);
                        l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
                        *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
@@ -2314,6 +2357,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                        *tx_flags &= ~I40E_TX_FLAGS_IPV4;
                        *tx_flags |= I40E_TX_FLAGS_IPV6;
                }
+               if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+                   (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING)        &&
+                   (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+                       oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+                                       oiph->daddr,
+                                       (skb->len - skb_transport_offset(skb)),
+                                       IPPROTO_UDP, 0);
+                       *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+               }
        } else {
                network_hdr_len = skb_network_header_len(skb);
                this_ip_hdr = ip_hdr(skb);
index 429833c47245faa6cd3ad7d1e757b9668c169b05..f1385a1989fa822fba79074348549ba631f2509e 100644 (file)
@@ -78,6 +78,18 @@ enum i40e_dyn_idx_t {
        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
        BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+       BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+       BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+       BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+       BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+       BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+       BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+       (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+         I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
 #define I40E_RXBUFFER_2048  2048
@@ -253,6 +265,10 @@ struct i40e_ring {
        bool ring_active;               /* is ring online or not */
        bool arm_wb;            /* do something to arm write back */
 
+       u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR       BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM  BIT(1)
+
        /* stats structs */
        struct i40e_queue_stats stats;
        struct u64_stats_sync syncp;
index a20128b82b62511ca55ec984d6587b3050761c82..61b6b114b4bc280f272672e981fe73d588128721 100644 (file)
 #define I40E_DEV_ID_20G_KR2            0x1587
 #define I40E_DEV_ID_VF                 0x154C
 #define I40E_DEV_ID_VF_HV              0x1571
+#define I40E_DEV_ID_SFP_X722           0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
+#define I40E_DEV_ID_X722_VF            0x37CD
+#define I40E_DEV_ID_X722_VF_HV         0x37D9
 
 #define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
                                         (d) == I40E_DEV_ID_QSFP_B  || \
@@ -120,6 +125,8 @@ enum i40e_mac_type {
        I40E_MAC_X710,
        I40E_MAC_XL710,
        I40E_MAC_VF,
+       I40E_MAC_X722,
+       I40E_MAC_X722_VF,
        I40E_MAC_GENERIC,
 };
 
@@ -502,7 +509,8 @@ struct i40e_hw {
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
 {
-       return hw->mac.type == I40E_MAC_VF;
+       return (hw->mac.type == I40E_MAC_VF ||
+               hw->mac.type == I40E_MAC_X722_VF);
 }
 
 struct i40e_driver_version {
@@ -599,14 +607,18 @@ enum i40e_rx_desc_status_bits {
        I40E_RX_DESC_STATUS_CRCP_SHIFT          = 4,
        I40E_RX_DESC_STATUS_TSYNINDX_SHIFT      = 5, /* 2 BITS */
        I40E_RX_DESC_STATUS_TSYNVALID_SHIFT     = 7,
-       I40E_RX_DESC_STATUS_PIF_SHIFT           = 8,
+       /* Note: Bit 8 is reserved in X710 and XL710 */
+       I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT     = 8,
        I40E_RX_DESC_STATUS_UMBCAST_SHIFT       = 9, /* 2 BITS */
        I40E_RX_DESC_STATUS_FLM_SHIFT           = 11,
        I40E_RX_DESC_STATUS_FLTSTAT_SHIFT       = 12, /* 2 BITS */
        I40E_RX_DESC_STATUS_LPBK_SHIFT          = 14,
        I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT     = 15,
        I40E_RX_DESC_STATUS_RESERVED_SHIFT      = 16, /* 2 BITS */
-       I40E_RX_DESC_STATUS_UDP_0_SHIFT         = 18,
+       /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+        * UDP header
+        */
+       I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT     = 18,
        I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
 };
 
@@ -947,6 +959,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
 #define I40E_TXD_CTX_QW0_DECTTL_MASK   (0xFULL << \
                                         I40E_TXD_CTX_QW0_DECTTL_SHIFT)
 
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT  23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK   BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
 struct i40e_filter_program_desc {
        __le32 qindex_flex_ptype_vsi;
        __le32 rsvd;
@@ -965,15 +979,24 @@ struct i40e_filter_program_desc {
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-       /* Note: Values 0-30 are reserved for future use */
+       /* Note: Values 0-28 are reserved for future use.
+        * Value 29, 30, 32 are not supported on XL710 and X710.
+        */
+       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP        = 29,
+       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP      = 30,
        I40E_FILTER_PCTYPE_NONF_IPV4_UDP                = 31,
-       /* Note: Value 32 is reserved for future use */
+       I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK     = 32,
        I40E_FILTER_PCTYPE_NONF_IPV4_TCP                = 33,
        I40E_FILTER_PCTYPE_NONF_IPV4_SCTP               = 34,
        I40E_FILTER_PCTYPE_NONF_IPV4_OTHER              = 35,
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
-       /* Note: Values 37-40 are reserved for future use */
+       /* Note: Values 37-38 are reserved for future use.
+        * Value 39, 40, 42 are not supported on XL710 and X710.
+        */
+       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP        = 39,
+       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP      = 40,
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
+       I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK     = 42,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
        I40E_FILTER_PCTYPE_NONF_IPV6_SCTP               = 44,
        I40E_FILTER_PCTYPE_NONF_IPV6_OTHER              = 45,
@@ -1026,6 +1049,10 @@ enum i40e_filter_program_desc_pcmd {
 #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
                                          I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
 
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT    (0xEULL + \
+                                        I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK     BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
 #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK        (0x1FFUL << \
                                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
index d29d4062addf51141dbeefc152d1613b8171c981..8a7607c6e142d05b247ff7b948e9c6599c22a91c 100644 (file)
@@ -1177,9 +1177,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi->info.pvid)
-               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
-                                          I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
-
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+               if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+                       vfres->vf_offload_flags |=
+                               I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+       } else {
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+       }
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
index d5bd6f06692137819e557fc719df2f981253daa3..c8022092d36986acce3062af4ec8b2d643314495 100644 (file)
@@ -35,7 +35,6 @@
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
 #define I40E_FW_API_VERSION_MINOR      0x0004
-#define I40E_FW_API_VERSION_A0_MINOR  0x0000
 
 struct i40e_aq_desc {
        __le16 flags;
@@ -255,6 +254,10 @@ enum i40e_admin_queue_opc {
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
        i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
+       i40e_aqc_opc_set_rss_key        = 0x0B02,
+       i40e_aqc_opc_set_rss_lut        = 0x0B03,
+       i40e_aqc_opc_get_rss_key        = 0x0B04,
+       i40e_aqc_opc_get_rss_lut        = 0x0B05,
 
        /* Async Events */
        i40e_aqc_opc_event_lan_overflow         = 0x1001,
@@ -819,8 +822,12 @@ struct i40e_aqc_vsi_properties_data {
                                         I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
        /* queueing option section */
        u8      queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA  0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA    0x08
 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA    0x10
 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA   0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI        0x40
        u8      queueing_opt_reserved[3];
        /* scheduler section */
        u8      up_enable_bits;
@@ -2089,6 +2096,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT      0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK       (0x3FF << \
+                                       I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+       __le16  vsi_id;
+       u8      reserved[6];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+       u8 standard_rss_key[0x28];
+       u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct  i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT      0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK       (0x3FF << \
+                                       I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+       __le16  vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT  0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK   (0x1 << \
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI    0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF     1
+       __le16  flags;
+       u8      reserved[4];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
 /* tunnel key structure 0x0B10 */
 
 struct i40e_aqc_tunnel_key_structure_A0 {
index 56c7e751149b0cba324c2722178705c42e4c908f..023d32d090ce49094deb5e851536e62b545892ed 100644 (file)
@@ -54,6 +54,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_20G_KR2:
                        hw->mac.type = I40E_MAC_XL710;
                        break;
+               case I40E_DEV_ID_SFP_X722:
+               case I40E_DEV_ID_1G_BASE_T_X722:
+               case I40E_DEV_ID_10G_BASE_T_X722:
+                       hw->mac.type = I40E_MAC_X722;
+                       break;
+               case I40E_DEV_ID_X722_VF:
+               case I40E_DEV_ID_X722_VF_HV:
+                       hw->mac.type = I40E_MAC_X722_VF;
+                       break;
                case I40E_DEV_ID_VF:
                case I40E_DEV_ID_VF_HV:
                        hw->mac.type = I40E_MAC_VF;
@@ -383,6 +392,169 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
        return status;
 }
 
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+                                          u16 vsi_id, bool pf_lut,
+                                          u8 *lut, u16 lut_size,
+                                          bool set)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_set_rss_lut *cmd_resp =
+                  (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+       if (set)
+               i40evf_fill_default_direct_cmd_desc(&desc,
+                                                   i40e_aqc_opc_set_rss_lut);
+       else
+               i40evf_fill_default_direct_cmd_desc(&desc,
+                                                   i40e_aqc_opc_get_rss_lut);
+
+       /* Indirect command */
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+       cmd_resp->vsi_id =
+                       cpu_to_le16((u16)((vsi_id <<
+                                         I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+                                         I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+       cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+       if (pf_lut)
+               cmd_resp->flags |= cpu_to_le16((u16)
+                                       ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+       else
+               cmd_resp->flags |= cpu_to_le16((u16)
+                                       ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+       cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+       cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+       status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+       return status;
+}
+
+/**
+ * i40evf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+                                 bool pf_lut, u8 *lut, u16 lut_size)
+{
+       return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+                                      false);
+}
+
+/**
+ * i40evf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+                                 bool pf_lut, u8 *lut, u16 lut_size)
+{
+       return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+                                     u16 vsi_id,
+                                     struct i40e_aqc_get_set_rss_key_data *key,
+                                     bool set)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_set_rss_key *cmd_resp =
+                       (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+       u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+       if (set)
+               i40evf_fill_default_direct_cmd_desc(&desc,
+                                                   i40e_aqc_opc_set_rss_key);
+       else
+               i40evf_fill_default_direct_cmd_desc(&desc,
+                                                   i40e_aqc_opc_get_rss_key);
+
+       /* Indirect command */
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+       cmd_resp->vsi_id =
+                       cpu_to_le16((u16)((vsi_id <<
+                                         I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+                                         I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+       cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+       cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+       cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+       status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+       return status;
+}
+
+/**
+ * i40evf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+                                 u16 vsi_id,
+                                 struct i40e_aqc_get_set_rss_key_data *key)
+{
+       return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40evf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+                                 u16 vsi_id,
+                                 struct i40e_aqc_get_set_rss_key_data *key)
+{
+       return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
 
 /* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
index 856eb9d06595eb7eff8ef66df8df0eb137751808..55ae4b0f8192fea531f0ea0e29013b111b540a4b 100644 (file)
@@ -63,6 +63,17 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
 char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
 char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+                                 bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+                                 bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+                                 u16 seid,
+                                 struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+                                 u16 seid,
+                                 struct i40e_aqc_get_set_rss_key_data *key);
+
 i40e_status i40e_set_mac_type(struct i40e_hw *hw);
 
 extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
index 3cc737629bf74030c6b4b824eee5905706d37eac..2e2ccc1719b680682851b9dd8d29669cef0563cb 100644 (file)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
index 60f88e4ad065ebdfe609e5e94cdce94e5a410d3c..7309479a07642fc158fc810ab5dc35455eb848f4 100644 (file)
@@ -366,15 +366,32 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
  **/
 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
-       u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
-                 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
-                 I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
-                 I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
-                 /* allow 00 to be written to the index */
-
-       wr32(&vsi->back->hw,
-            I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
-            val);
+       u16 flags = q_vector->tx.ring[0].flags;
+
+       if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+               u32 val;
+
+               if (q_vector->arm_wb_state)
+                       return;
+
+               val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK;
+
+               wr32(&vsi->back->hw,
+                    I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+                                         vsi->base_vector - 1),
+                    val);
+               q_vector->arm_wb_state = true;
+       } else {
+               u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+                         I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+                         I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+                         I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK;
+                         /* allow 00 to be written to the index */
+
+               wr32(&vsi->back->hw,
+                    I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+                                         vsi->base_vector - 1), val);
+       }
 }
 
 /**
@@ -1372,6 +1389,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
                return budget;
        }
 
+       if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+               q_vector->arm_wb_state = false;
+
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
        i40e_update_enable_itr(vsi, q_vector);
@@ -1508,11 +1528,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
        struct iphdr *this_ip_hdr;
        u32 network_hdr_len;
        u8 l4_hdr = 0;
+       struct udphdr *oudph;
+       struct iphdr *oiph;
        u32 l4_tunnel = 0;
 
        if (skb->encapsulation) {
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
+                       oudph = udp_hdr(skb);
+                       oiph = ip_hdr(skb);
                        l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
                        *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
@@ -1551,6 +1575,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                }
 
 
+               if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+                   (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING)        &&
+                   (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+                       oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+                                       oiph->daddr,
+                                       (skb->len - skb_transport_offset(skb)),
+                                       IPPROTO_UDP, 0);
+                       *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+               }
        } else {
                network_hdr_len = skb_network_header_len(skb);
                this_ip_hdr = ip_hdr(skb);
index 6b47c818d1f08c11b81fd5e21b7ba9b162bdadd9..9a30f5d8c0899c2f813319f347ff82b6e04faee1 100644 (file)
@@ -78,6 +78,18 @@ enum i40e_dyn_idx_t {
        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
        BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+               BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+               BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+               BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+               BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+               BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+               BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+       (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+               I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
 #define I40E_RXBUFFER_2048  2048
@@ -250,6 +262,10 @@ struct i40e_ring {
        bool ring_active;               /* is ring online or not */
        bool arm_wb;            /* do something to arm write back */
 
+       u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR       BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM  BIT(1)
+
        /* stats structs */
        struct i40e_queue_stats stats;
        struct u64_stats_sync syncp;
index 4ba9a012dcbac1bdbc87366df00f9d85c4e207f0..e32dc0b3616dc15b7b1d611fd5db642554b14be5 100644 (file)
 #define I40E_DEV_ID_20G_KR2            0x1587
 #define I40E_DEV_ID_VF                 0x154C
 #define I40E_DEV_ID_VF_HV              0x1571
+#define I40E_DEV_ID_SFP_X722           0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
+#define I40E_DEV_ID_X722_VF            0x37CD
+#define I40E_DEV_ID_X722_VF_HV         0x37D9
 
 #define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
                                         (d) == I40E_DEV_ID_QSFP_B  || \
@@ -120,6 +125,8 @@ enum i40e_mac_type {
        I40E_MAC_X710,
        I40E_MAC_XL710,
        I40E_MAC_VF,
+       I40E_MAC_X722,
+       I40E_MAC_X722_VF,
        I40E_MAC_GENERIC,
 };
 
@@ -496,7 +503,8 @@ struct i40e_hw {
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
 {
-       return hw->mac.type == I40E_MAC_VF;
+       return (hw->mac.type == I40E_MAC_VF ||
+               hw->mac.type == I40E_MAC_X722_VF);
 }
 
 struct i40e_driver_version {
@@ -593,14 +601,18 @@ enum i40e_rx_desc_status_bits {
        I40E_RX_DESC_STATUS_CRCP_SHIFT          = 4,
        I40E_RX_DESC_STATUS_TSYNINDX_SHIFT      = 5, /* 2 BITS */
        I40E_RX_DESC_STATUS_TSYNVALID_SHIFT     = 7,
-       I40E_RX_DESC_STATUS_PIF_SHIFT           = 8,
+       /* Note: Bit 8 is reserved in X710 and XL710 */
+       I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT     = 8,
        I40E_RX_DESC_STATUS_UMBCAST_SHIFT       = 9, /* 2 BITS */
        I40E_RX_DESC_STATUS_FLM_SHIFT           = 11,
        I40E_RX_DESC_STATUS_FLTSTAT_SHIFT       = 12, /* 2 BITS */
        I40E_RX_DESC_STATUS_LPBK_SHIFT          = 14,
        I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT     = 15,
        I40E_RX_DESC_STATUS_RESERVED_SHIFT      = 16, /* 2 BITS */
-       I40E_RX_DESC_STATUS_UDP_0_SHIFT         = 18,
+       /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+        * UDP header
+        */
+       I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT     = 18,
        I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
 };
 
@@ -941,6 +953,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
 #define I40E_TXD_CTX_QW0_DECTTL_MASK   (0xFULL << \
                                         I40E_TXD_CTX_QW0_DECTTL_SHIFT)
 
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT  23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK   BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
 struct i40e_filter_program_desc {
        __le32 qindex_flex_ptype_vsi;
        __le32 rsvd;
@@ -959,15 +973,24 @@ struct i40e_filter_program_desc {
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-       /* Note: Values 0-30 are reserved for future use */
+       /* Note: Values 0-28 are reserved for future use.
+        * Value 29, 30, 32 are not supported on XL710 and X710.
+        */
+       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP        = 29,
+       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP      = 30,
        I40E_FILTER_PCTYPE_NONF_IPV4_UDP                = 31,
-       /* Note: Value 32 is reserved for future use */
+       I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK     = 32,
        I40E_FILTER_PCTYPE_NONF_IPV4_TCP                = 33,
        I40E_FILTER_PCTYPE_NONF_IPV4_SCTP               = 34,
        I40E_FILTER_PCTYPE_NONF_IPV4_OTHER              = 35,
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
-       /* Note: Values 37-40 are reserved for future use */
+       /* Note: Values 37-38 are reserved for future use.
+        * Value 39, 40, 42 are not supported on XL710 and X710.
+        */
+       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP        = 39,
+       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP      = 40,
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
+       I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK     = 42,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
        I40E_FILTER_PCTYPE_NONF_IPV6_SCTP               = 44,
        I40E_FILTER_PCTYPE_NONF_IPV6_OTHER              = 45,
index c33c7cce52fe2c5decf79e514a342275cb7f2f3a..3817cbbf45e6e753089b3d36e0c03daf7b5073d8 100644 (file)
@@ -101,6 +101,8 @@ struct i40e_vsi {
 #define MAX_RX_QUEUES 8
 #define MAX_TX_QUEUES MAX_RX_QUEUES
 
+#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
+
 /* MAX_MSIX_Q_VECTORS of these are allocated,
  * but we only use one per queue-specific vector.
  */
@@ -115,6 +117,7 @@ struct i40e_q_vector {
        u8 num_ringpairs;       /* total number of ring pairs in vector */
        int v_idx;        /* vector index in list */
        char name[IFNAMSIZ + 9];
+       bool arm_wb_state;
        cpumask_var_t affinity_mask;
 };
 
@@ -218,11 +221,15 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_PF_COMMS_FAILED              BIT(8)
 #define I40EVF_FLAG_RESET_PENDING                BIT(9)
 #define I40EVF_FLAG_RESET_NEEDED                 BIT(10)
-/* duplcates for common code */
+#define I40EVF_FLAG_WB_ON_ITR_CAPABLE          BIT(11)
+#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE     BIT(12)
+/* duplicates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED              0
 #define I40E_FLAG_DCB_ENABLED                   0
 #define I40E_FLAG_IN_NETPOLL                    I40EVF_FLAG_IN_NETPOLL
 #define I40E_FLAG_RX_CSUM_ENABLED                I40EVF_FLAG_RX_CSUM_ENABLED
+#define I40E_FLAG_WB_ON_ITR_CAPABLE            I40EVF_FLAG_WB_ON_ITR_CAPABLE
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE       I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
        /* flags for admin queue service task */
        u32 aq_required;
 #define I40EVF_FLAG_AQ_ENABLE_QUEUES           BIT(0)
@@ -234,6 +241,7 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES                BIT(6)
 #define I40EVF_FLAG_AQ_MAP_VECTORS             BIT(7)
 #define I40EVF_FLAG_AQ_HANDLE_RESET            BIT(8)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS           BIT(9)
 #define I40EVF_FLAG_AQ_GET_CONFIG              BIT(10)
 
        /* OS defined structs */
index 1503cad918d88d42a559ecf4af15692751e3ba13..2a6063a3a14d8dc2474ec3f91ad9cb3aad43aa2d 100644 (file)
@@ -49,6 +49,7 @@ static const char i40evf_copyright[] =
  */
 static const struct pci_device_id i40evf_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
        /* required last entry */
        {0, }
 };
@@ -1171,6 +1172,113 @@ out:
        return err;
 }
 
+/**
+ * i40e_configure_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+       struct i40e_aqc_get_set_rss_key_data rss_key;
+       struct i40evf_adapter *adapter = vsi->back;
+       struct i40e_hw *hw = &adapter->hw;
+       int ret = 0, i;
+       u8 *rss_lut;
+
+       if (!vsi->id)
+               return;
+
+       if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+               /* bail because we already have a command pending */
+               dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n",
+                       adapter->current_op);
+               return;
+       }
+
+       memset(&rss_key, 0, sizeof(rss_key));
+       memcpy(&rss_key, seed, sizeof(rss_key));
+
+       rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL);
+       if (!rss_lut)
+               return;
+
+       /* Populate the LUT with max no. PF queues in round robin fashion */
+       for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++)
+               rss_lut[i] = i % adapter->num_active_queues;
+
+       ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key);
+       if (ret) {
+               dev_err(&adapter->pdev->dev,
+                       "Cannot set RSS key, err %s aq_err %s\n",
+                       i40evf_stat_str(hw, ret),
+                       i40evf_aq_str(hw, hw->aq.asq_last_status));
+               return;
+       }
+
+       ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut,
+                                   (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4);
+       if (ret)
+               dev_err(&adapter->pdev->dev,
+                       "Cannot set RSS lut, err %s aq_err %s\n",
+                       i40evf_stat_str(hw, ret),
+                       i40evf_aq_str(hw, hw->aq.asq_last_status));
+}
+
+/**
+ * i40e_configure_rss_reg - Prepare for RSS if used
+ * @adapter: board private structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter,
+                                    const u8 *seed)
+{
+       struct i40e_hw *hw = &adapter->hw;
+       u32 *seed_dw = (u32 *)seed;
+       u32 cqueue = 0;
+       u32 lut = 0;
+       int i, j;
+
+       /* Fill out hash function seed */
+       for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+               wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
+
+       /* Populate the LUT with max no. PF queues in round robin fashion */
+       for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+               lut = 0;
+               for (j = 0; j < 4; j++) {
+                       if (cqueue == adapter->num_active_queues)
+                               cqueue = 0;
+                       lut |= ((cqueue) << (8 * j));
+                       cqueue++;
+               }
+               wr32(hw, I40E_VFQF_HLUT(i), lut);
+       }
+       i40e_flush(hw);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+       struct i40e_hw *hw = &adapter->hw;
+       u8 seed[I40EVF_HKEY_ARRAY_SIZE];
+       u64 hena;
+
+       netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
+
+       /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+       hena = I40E_DEFAULT_RSS_HENA;
+       wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+       wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+       if (RSS_AQ(adapter))
+               i40evf_configure_rss_aq(&adapter->vsi, seed);
+       else
+               i40evf_configure_rss_reg(adapter, seed);
+}
+
 /**
  * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
  * @adapter: board private structure to initialize
@@ -1416,6 +1524,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
                goto watchdog_done;
        }
 
+       if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
+               /* This message goes straight to the firmware, not the
+                * PF, so we don't have to set current_op as we will
+                * not get a response through the ARQ.
+                */
+               i40evf_configure_rss(adapter);
+               adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
+               goto watchdog_done;
+       }
+
        if (adapter->state == __I40EVF_RUNNING)
                i40evf_request_stats(adapter);
 watchdog_done:
@@ -1438,45 +1556,6 @@ restart_watchdog:
        schedule_work(&adapter->adminq_task);
 }
 
-/**
- * i40evf_configure_rss - Prepare for RSS
- * @adapter: board private structure
- **/
-static void i40evf_configure_rss(struct i40evf_adapter *adapter)
-{
-       u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
-       struct i40e_hw *hw = &adapter->hw;
-       u32 cqueue = 0;
-       u32 lut = 0;
-       int i, j;
-       u64 hena;
-
-       /* Hash type is configured by the PF - we just supply the key */
-       netdev_rss_key_fill(rss_key, sizeof(rss_key));
-
-       /* Fill out hash function seed */
-       for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
-               wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
-
-       /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
-       hena = I40E_DEFAULT_RSS_HENA;
-       wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
-       wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
-
-       /* Populate the LUT with max no. of queues in round robin fashion */
-       for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
-               lut = 0;
-               for (j = 0; j < 4; j++) {
-                       if (cqueue == adapter->num_active_queues)
-                               cqueue = 0;
-                       lut |= ((cqueue) << (8 * j));
-                       cqueue++;
-               }
-               wr32(hw, I40E_VFQF_HLUT(i), lut);
-       }
-       i40e_flush(hw);
-}
-
 #define I40EVF_RESET_WAIT_MS 10
 #define I40EVF_RESET_WAIT_COUNT 500
 /**
@@ -2186,7 +2265,8 @@ static void i40evf_init_task(struct work_struct *work)
        if (err)
                goto err_sw_init;
        i40evf_map_rings_to_vectors(adapter);
-       i40evf_configure_rss(adapter);
+       if (!RSS_AQ(adapter))
+               i40evf_configure_rss(adapter);
        err = i40evf_request_misc_irq(adapter);
        if (err)
                goto err_sw_init;
@@ -2211,6 +2291,13 @@ static void i40evf_init_task(struct work_struct *work)
        adapter->state = __I40EVF_DOWN;
        set_bit(__I40E_DOWN, &adapter->vsi.state);
        i40evf_misc_irq_enable(adapter);
+
+       if (RSS_AQ(adapter)) {
+               adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+               mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+       } else {
+               i40evf_configure_rss(adapter);
+       }
        return;
 restart:
        schedule_delayed_work(&adapter->init_task,
index d19256994e5cfefce6793dbe58b953d9c79a0504..7a73510e547cd49f38629b4d60fbab8b8dce945a 100644 (file)
@@ -231,6 +231,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
        /* Verify phy id and set remaining function pointers */
        switch (phy->id) {
        case M88E1543_E_PHY_ID:
+       case M88E1512_E_PHY_ID:
        case I347AT4_E_PHY_ID:
        case M88E1112_E_PHY_ID:
        case M88E1111_I_PHY_ID:
@@ -243,7 +244,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
                else
                        phy->ops.get_cable_length = igb_get_cable_length_m88;
                phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
-               /* Check if this PHY is confgured for media swap. */
+               /* Check if this PHY is configured for media swap. */
                if (phy->id == M88E1112_E_PHY_ID) {
                        u16 data;
 
@@ -266,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
                                hw->mac.ops.check_for_link =
                                                igb_check_for_link_media_swap;
                }
+               if (phy->id == M88E1512_E_PHY_ID) {
+                       ret_val = igb_initialize_M88E1512_phy(hw);
+                       if (ret_val)
+                               goto out;
+               }
                break;
        case IGP03E1000_E_PHY_ID:
                phy->type = e1000_phy_igp_3;
@@ -897,6 +903,7 @@ out:
  **/
 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
 {
+       struct e1000_phy_info *phy = &hw->phy;
        s32 ret_val;
 
        /* This isn't a true "hard" reset, but is the only reset
@@ -913,7 +920,11 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
                goto out;
 
        ret_val = igb_phy_sw_reset(hw);
+       if (ret_val)
+               goto out;
 
+       if (phy->id == M88E1512_E_PHY_ID)
+               ret_val = igb_initialize_M88E1512_phy(hw);
 out:
        return ret_val;
 }
@@ -1587,6 +1598,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
                case I347AT4_E_PHY_ID:
                case M88E1112_E_PHY_ID:
                case M88E1543_E_PHY_ID:
+               case M88E1512_E_PHY_ID:
                case I210_I_PHY_ID:
                        ret_val = igb_copper_link_setup_m88_gen2(hw);
                        break;
@@ -2629,7 +2641,8 @@ s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
        u16 phy_data;
 
        if ((hw->phy.media_type != e1000_media_type_copper) ||
-           (phy->id != M88E1543_E_PHY_ID))
+           ((phy->id != M88E1543_E_PHY_ID) &&
+            (phy->id != M88E1512_E_PHY_ID)))
                goto out;
 
        if (!hw->dev_spec._82575.eee_disable) {
@@ -2709,7 +2722,8 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
 
        /* Check if EEE is supported on this device. */
        if ((hw->phy.media_type != e1000_media_type_copper) ||
-           (phy->id != M88E1543_E_PHY_ID))
+           ((phy->id != M88E1543_E_PHY_ID) &&
+            (phy->id != M88E1512_E_PHY_ID)))
                goto out;
 
        ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
index f8684aa285be8cac987263db6676f9d1076b5f9b..b1915043bc0cfefbe416b6bf5ca59658a8f47278 100644 (file)
 #define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT   7
 #define E1000_M88E1112_PAGE_ADDR               0x16
 #define E1000_M88E1112_STATUS                  0x01
+#define E1000_M88E1512_CFG_REG_1               0x0010
+#define E1000_M88E1512_CFG_REG_2               0x0011
+#define E1000_M88E1512_CFG_REG_3               0x0007
+#define E1000_M88E1512_MODE                    0x0014
 
 /* PCI Express Control */
 #define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
 #define M88_VENDOR           0x0141
 #define I210_I_PHY_ID        0x01410C00
 #define M88E1543_E_PHY_ID    0x01410EA0
+#define M88E1512_E_PHY_ID    0x01410DD0
 
 /* M88E1000 Specific Registers */
 #define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
index 987c9de247645a2d0ec1992d65703bf22a0daeb2..23ec28f43f6d3d354094655c7c696c5f7e3bfb1b 100644 (file)
@@ -1262,6 +1262,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
                        switch (hw->phy.id) {
                        case I347AT4_E_PHY_ID:
                        case M88E1112_E_PHY_ID:
+                       case M88E1543_E_PHY_ID:
+                       case M88E1512_E_PHY_ID:
                        case I210_I_PHY_ID:
                                reset_dsp = false;
                                break;
@@ -1270,9 +1272,9 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
                                        reset_dsp = false;
                                break;
                        }
-                       if (!reset_dsp)
+                       if (!reset_dsp) {
                                hw_dbg("Link taking longer than expected.\n");
-                       else {
+                       else {
                                /* We didn't get link.
                                 * Reset the DSP and cross our fingers.
                                 */
@@ -1297,6 +1299,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
        if (hw->phy.type != e1000_phy_m88 ||
            hw->phy.id == I347AT4_E_PHY_ID ||
            hw->phy.id == M88E1112_E_PHY_ID ||
+           hw->phy.id == M88E1543_E_PHY_ID ||
+           hw->phy.id == M88E1512_E_PHY_ID ||
            hw->phy.id == I210_I_PHY_ID)
                goto out;
 
@@ -1737,6 +1741,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
                phy->cable_length = phy_data / (is_cm ? 100 : 1);
                break;
        case M88E1543_E_PHY_ID:
+       case M88E1512_E_PHY_ID:
        case I347AT4_E_PHY_ID:
                /* Remember the original page select and set it to 7 */
                ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -2188,6 +2193,90 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
        return 0;
 }
 
+/**
+ *  igb_initialize_M88E1512_phy - Initialize M88E1512 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize Marvel 1512 to work correctly with Avoton.
+ **/
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = 0;
+
+       /* Switch to PHY page 0xFF. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+       if (ret_val)
+               goto out;
+
+       /* Switch to PHY page 0xFB. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+       if (ret_val)
+               goto out;
+
+       /* Switch to PHY page 0x12. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+       if (ret_val)
+               goto out;
+
+       /* Change mode to SGMII-to-Copper */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+       if (ret_val)
+               goto out;
+
+       /* Return the PHY to page 0. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+       if (ret_val)
+               goto out;
+
+       ret_val = igb_phy_sw_reset(hw);
+       if (ret_val) {
+               hw_dbg("Error committing the PHY changes\n");
+               return ret_val;
+       }
+
+       /* msec_delay(1000); */
+       usleep_range(1000, 2000);
+out:
+       return ret_val;
+}
+
 /**
  * igb_power_up_phy_copper - Restore copper link in case of PHY power down
  * @hw: pointer to the HW structure
index 7af4ffab0285653c4c400992edcd163782f919ac..24d55edbb0e3a8b58290f94e2c34f3708d8c0111 100644 (file)
@@ -61,6 +61,7 @@ s32  igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
 void igb_power_up_phy_copper(struct e1000_hw *hw);
 void igb_power_down_phy_copper(struct e1000_hw *hw);
 s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32  igb_initialize_M88E1512_phy(struct e1000_hw *hw);
 s32  igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
index 6f0490d0e981c487ce812545f0dbb834f83fe1d9..4af2870e49f88aaa67559e6d454328eb1ff1d6db 100644 (file)
 #define E1000_TRGTTIMH0  0x0B648 /* Target Time Register 0 High - RW */
 #define E1000_TRGTTIML1  0x0B64C /* Target Time Register 1 Low  - RW */
 #define E1000_TRGTTIMH1  0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_FREQOUT0   0x0B654 /* Frequency Out 0 Control Register - RW */
+#define E1000_FREQOUT1   0x0B658 /* Frequency Out 1 Control Register - RW */
 #define E1000_AUXSTMPL0  0x0B65C /* Auxiliary Time Stamp 0 Register Low  - RO */
 #define E1000_AUXSTMPH0  0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
 #define E1000_AUXSTMPL1  0x0B664 /* Auxiliary Time Stamp 1 Register Low  - RO */
index c2bd4f98a8376ecab82b99623ce2a9454ed148a9..212d668dabb382160ae04dfce3e6b1375b44832d 100644 (file)
@@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
                         struct sk_buff *skb);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
 #ifdef CONFIG_IGB_HWMON
 void igb_sysfs_exit(struct igb_adapter *adapter);
 int igb_sysfs_init(struct igb_adapter *adapter);
index b7b9c670bb3c7e24db5ff9d8e3437af06c27d317..74262768b09b75bd7ad267afb67e720a617bce10 100644 (file)
@@ -3008,6 +3008,7 @@ static int igb_set_channels(struct net_device *netdev,
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        unsigned int count = ch->combined_count;
+       unsigned int max_combined = 0;
 
        /* Verify they are not requesting separate vectors */
        if (!count || ch->rx_count || ch->tx_count)
@@ -3018,11 +3019,13 @@ static int igb_set_channels(struct net_device *netdev,
                return -EINVAL;
 
        /* Verify the number of channels doesn't exceed hw limits */
-       if (count > igb_max_channels(adapter))
+       max_combined = igb_max_channels(adapter);
+       if (count > max_combined)
                return -EINVAL;
 
        if (count != adapter->rss_queues) {
                adapter->rss_queues = count;
+               igb_set_flag_queue_pairs(adapter, max_combined);
 
                /* Hardware has to reinitialize queues and interrupts to
                 * match the new configuration.
index 41e27404689648a4bad220e174db32cdf1077580..1902ef8f4a0b30e1f509d130a1c2786632d646d4 100644 (file)
@@ -179,6 +179,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
 #ifdef CONFIG_PCI_IOV
 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
+static int igb_disable_sriov(struct pci_dev *dev);
+static int igb_pci_disable_sriov(struct pci_dev *dev);
 #endif
 
 #ifdef CONFIG_PM
@@ -1205,10 +1207,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
 
        /* allocate q_vector and rings */
        q_vector = adapter->q_vector[v_idx];
-       if (!q_vector)
+       if (!q_vector) {
                q_vector = kzalloc(size, GFP_KERNEL);
-       else
+       } else if (size > ksize(q_vector)) {
+               kfree_rcu(q_vector, rcu);
+               q_vector = kzalloc(size, GFP_KERNEL);
+       } else {
                memset(q_vector, 0, size);
+       }
        if (!q_vector)
                return -ENOMEM;
 
@@ -2645,7 +2651,11 @@ err_eeprom:
        if (hw->flash_address)
                iounmap(hw->flash_address);
 err_sw_init:
+       kfree(adapter->shadow_vfta);
        igb_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_PCI_IOV
+       igb_disable_sriov(pdev);
+#endif
        pci_iounmap(pdev, hw->hw_addr);
 err_ioremap:
        free_netdev(netdev);
@@ -2805,14 +2815,14 @@ static void igb_remove(struct pci_dev *pdev)
         */
        igb_release_hw_control(adapter);
 
-       unregister_netdev(netdev);
-
-       igb_clear_interrupt_scheme(adapter);
-
 #ifdef CONFIG_PCI_IOV
        igb_disable_sriov(pdev);
 #endif
 
+       unregister_netdev(netdev);
+
+       igb_clear_interrupt_scheme(adapter);
+
        pci_iounmap(pdev, hw->hw_addr);
        if (hw->flash_address)
                iounmap(hw->flash_address);
@@ -2847,7 +2857,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
                return;
 
        pci_sriov_set_totalvfs(pdev, 7);
-       igb_pci_enable_sriov(pdev, max_vfs);
+       igb_enable_sriov(pdev, max_vfs);
 
 #endif /* CONFIG_PCI_IOV */
 }
@@ -2888,6 +2898,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
 
        adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
 
+       igb_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
+                             const u32 max_rss_queues)
+{
+       struct e1000_hw *hw = &adapter->hw;
+
        /* Determine if we need to pair queues. */
        switch (hw->mac.type) {
        case e1000_82575:
@@ -2968,6 +2986,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
        }
 #endif /* CONFIG_PCI_IOV */
 
+       igb_probe_vfs(adapter);
+
        igb_init_queue_configuration(adapter);
 
        /* Setup and initialize a copy of the hw vlan table array */
@@ -2980,8 +3000,6 @@ static int igb_sw_init(struct igb_adapter *adapter)
                return -ENOMEM;
        }
 
-       igb_probe_vfs(adapter);
-
        /* Explicitly disable IRQ since the NIC can be in any state. */
        igb_irq_disable(adapter);
 
@@ -7401,6 +7419,7 @@ static int igb_resume(struct device *dev)
 
        if (igb_init_interrupt_scheme(adapter, true)) {
                dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+               rtnl_unlock();
                return -ENOMEM;
        }
 
@@ -7494,6 +7513,7 @@ static int igb_sriov_reinit(struct pci_dev *dev)
        igb_init_queue_configuration(adapter);
 
        if (igb_init_interrupt_scheme(adapter, true)) {
+               rtnl_unlock();
                dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
                return -ENOMEM;
        }
index c3a9392cbc192229f4178c913fad8ab64d8c44c3..5982f28d521a2c116d49ba4cda22d8520c0cb1dc 100644 (file)
@@ -405,7 +405,7 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
        wr32(E1000_CTRL_EXT, ctrl_ext);
 }
 
-static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
+static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
 {
        static const u32 aux0_sel_sdp[IGB_N_SDP] = {
                AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
@@ -424,6 +424,14 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
                TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
                TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
        };
+       static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = {
+               TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0,
+               TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0,
+       };
+       static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = {
+               TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
+               TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
+       };
        static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
                TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
                TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
@@ -445,11 +453,17 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
                tssdp &= ~AUX1_TS_SDP_EN;
 
        tssdp &= ~ts_sdp_sel_clr[pin];
-       if (chan == 1)
-               tssdp |= ts_sdp_sel_tt1[pin];
-       else
-               tssdp |= ts_sdp_sel_tt0[pin];
-
+       if (freq) {
+               if (chan == 1)
+                       tssdp |= ts_sdp_sel_fc1[pin];
+               else
+                       tssdp |= ts_sdp_sel_fc0[pin];
+       } else {
+               if (chan == 1)
+                       tssdp |= ts_sdp_sel_tt1[pin];
+               else
+                       tssdp |= ts_sdp_sel_tt0[pin];
+       }
        tssdp |= ts_sdp_en[pin];
 
        wr32(E1000_TSSDP, tssdp);
@@ -463,10 +477,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
        struct igb_adapter *igb =
                container_of(ptp, struct igb_adapter, ptp_caps);
        struct e1000_hw *hw = &igb->hw;
-       u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
+       u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
        unsigned long flags;
        struct timespec ts;
-       int pin = -1;
+       int use_freq = 0, pin = -1;
        s64 ns;
 
        switch (rq->type) {
@@ -511,40 +525,58 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
                ts.tv_nsec = rq->perout.period.nsec;
                ns = timespec_to_ns(&ts);
                ns = ns >> 1;
-               if (on && ns < 500000LL) {
-                       /* 2k interrupts per second is an awful lot. */
-                       return -EINVAL;
+               if (on && ns <= 70000000LL) {
+                       if (ns < 8LL)
+                               return -EINVAL;
+                       use_freq = 1;
                }
                ts = ns_to_timespec(ns);
                if (rq->perout.index == 1) {
-                       tsauxc_mask = TSAUXC_EN_TT1;
-                       tsim_mask = TSINTR_TT1;
+                       if (use_freq) {
+                               tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
+                               tsim_mask = 0;
+                       } else {
+                               tsauxc_mask = TSAUXC_EN_TT1;
+                               tsim_mask = TSINTR_TT1;
+                       }
                        trgttiml = E1000_TRGTTIML1;
                        trgttimh = E1000_TRGTTIMH1;
+                       freqout = E1000_FREQOUT1;
                } else {
-                       tsauxc_mask = TSAUXC_EN_TT0;
-                       tsim_mask = TSINTR_TT0;
+                       if (use_freq) {
+                               tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0;
+                               tsim_mask = 0;
+                       } else {
+                               tsauxc_mask = TSAUXC_EN_TT0;
+                               tsim_mask = TSINTR_TT0;
+                       }
                        trgttiml = E1000_TRGTTIML0;
                        trgttimh = E1000_TRGTTIMH0;
+                       freqout = E1000_FREQOUT0;
                }
                spin_lock_irqsave(&igb->tmreg_lock, flags);
                tsauxc = rd32(E1000_TSAUXC);
                tsim = rd32(E1000_TSIM);
+               if (rq->perout.index == 1) {
+                       tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+                       tsim &= ~TSINTR_TT1;
+               } else {
+                       tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+                       tsim &= ~TSINTR_TT0;
+               }
                if (on) {
                        int i = rq->perout.index;
-
-                       igb_pin_perout(igb, i, pin);
+                       igb_pin_perout(igb, i, pin, use_freq);
                        igb->perout[i].start.tv_sec = rq->perout.start.sec;
                        igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
                        igb->perout[i].period.tv_sec = ts.tv_sec;
                        igb->perout[i].period.tv_nsec = ts.tv_nsec;
                        wr32(trgttimh, rq->perout.start.sec);
                        wr32(trgttiml, rq->perout.start.nsec);
+                       if (use_freq)
+                               wr32(freqout, ns);
                        tsauxc |= tsauxc_mask;
                        tsim |= tsim_mask;
-               } else {
-                       tsauxc &= ~tsauxc_mask;
-                       tsim &= ~tsim_mask;
                }
                wr32(E1000_TSAUXC, tsauxc);
                wr32(E1000_TSIM, tsim);
index 95af14e139d769254e8b3e20982b37a5888ebbba..686fa7184179a473599584f25e0a6d714baff54f 100644 (file)
@@ -319,6 +319,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
                        dma_unmap_single(&pdev->dev, buffer_info->dma,
                                         adapter->rx_ps_hdr_size,
                                         DMA_FROM_DEVICE);
+                       buffer_info->dma = 0;
                        skb_put(skb, hlen);
                }
 
index 3e6a9319c7185b52a4571cbbab61aa9dd54c422c..7906234c51642d60b4d79cfa4dfc48ade9ae353c 100644 (file)
@@ -248,8 +248,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
        enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
        struct pci_dev *pdev;
 
-       /* determine whether to use the the parent device
-        */
+       /* determine whether to use the parent device */
        if (ixgbe_pcie_from_parent(&adapter->hw))
                pdev = adapter->pdev->bus->parent->self;
        else
index b6f424f3b1a8388da48c6038faae16922e773e7a..4615a949381d9ec525124e588820fab7d54d0a4e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -3462,14 +3462,14 @@ struct ixgbe_info {
 #define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P)       ((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)        ((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P)    ((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P)   ((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P)                ((P == 0) ? (0x5A00) : (0x9A00))
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_CTRL_1(P)       ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)        ((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P)    ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P)   ((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P)                ((P) ? 0x9A00 : 0x5A00)
 
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B           (1 << 9)
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS         (1 << 11)
index 3e8b1bfb1f2e316212bd9b60fa06522ca4dc68db..d9884fd15b453e2486177d58b7fc40bcd5aaf7cc 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/of_address.h>
 #include <linux/phy.h>
 #include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
 #include <uapi/linux/ppp_defs.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 
 /* Coalescing */
 #define MVPP2_TXDONE_COAL_PKTS_THRESH  15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
 #define MVPP2_RX_COAL_PKTS             32
 #define MVPP2_RX_COAL_USEC             100
 
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
        u64     tx_bytes;
 };
 
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+       struct hrtimer tx_done_timer;
+       bool timer_scheduled;
+       /* Tasklet for egress finalization */
+       struct tasklet_struct tx_done_tasklet;
+};
+
 struct mvpp2_port {
        u8 id;
 
@@ -679,6 +690,9 @@ struct mvpp2_port {
        u32 pending_cause_rx;
        struct napi_struct napi;
 
+       /* Per-CPU port control */
+       struct mvpp2_port_pcpu __percpu *pcpu;
+
        /* Flags */
        unsigned long flags;
 
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
        /* Array of transmitted skb */
        struct sk_buff **tx_skb;
 
+       /* Array of transmitted buffers' physical addresses */
+       dma_addr_t *tx_buffs;
+
        /* Index of last TX DMA descriptor that was inserted */
        int txq_put_index;
 
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
        /* Occupied buffers indicator */
        atomic_t in_use;
        int in_use_thresh;
-
-       spinlock_t lock;
 };
 
 struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
 }
 
 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
-                             struct sk_buff *skb)
+                             struct sk_buff *skb,
+                             struct mvpp2_tx_desc *tx_desc)
 {
        txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+       if (skb)
+               txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+                                                        tx_desc->buf_phys_addr;
        txq_pcpu->txq_put_index++;
        if (txq_pcpu->txq_put_index == txq_pcpu->size)
                txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
        bm_pool->pkt_size = 0;
        bm_pool->buf_num = 0;
        atomic_set(&bm_pool->in_use, 0);
-       spin_lock_init(&bm_pool->lock);
 
        return 0;
 }
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
                  int pkt_size)
 {
-       unsigned long flags = 0;
        struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
        int num;
 
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
                return NULL;
        }
 
-       spin_lock_irqsave(&new_pool->lock, flags);
-
        if (new_pool->type == MVPP2_BM_FREE)
                new_pool->type = type;
 
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
                if (num != pkts_num) {
                        WARN(1, "pool %d: %d of %d allocated\n",
                             new_pool->id, num, pkts_num);
-                       /* We need to undo the bufs_add() allocations */
-                       spin_unlock_irqrestore(&new_pool->lock, flags);
                        return NULL;
                }
        }
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
        mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
                                  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
 
-       spin_unlock_irqrestore(&new_pool->lock, flags);
-
        return new_pool;
 }
 
 /* Initialize pools for swf */
 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
 {
-       unsigned long flags = 0;
        int rxq;
 
        if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
                if (!port->pool_long)
                        return -ENOMEM;
 
-               spin_lock_irqsave(&port->pool_long->lock, flags);
                port->pool_long->port_map |= (1 << port->id);
-               spin_unlock_irqrestore(&port->pool_long->lock, flags);
 
                for (rxq = 0; rxq < rxq_number; rxq++)
                        mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
                if (!port->pool_short)
                        return -ENOMEM;
 
-               spin_lock_irqsave(&port->pool_short->lock, flags);
                port->pool_short->port_map |= (1 << port->id);
-               spin_unlock_irqrestore(&port->pool_short->lock, flags);
 
                for (rxq = 0; rxq < rxq_number; rxq++)
                        mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
 
        mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
                    (MVPP2_CAUSE_MISC_SUM_MASK |
-                    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
                     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
 }
 
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
        rxq->time_coal = usec;
 }
 
-/* Set threshold for TX_DONE pkts coalescing */
-static void mvpp2_tx_done_pkts_coal_set(void *arg)
-{
-       struct mvpp2_port *port = arg;
-       int queue;
-       u32 val;
-
-       for (queue = 0; queue < txq_number; queue++) {
-               struct mvpp2_tx_queue *txq = port->txqs[queue];
-
-               val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
-                      MVPP2_TRANSMITTED_THRESH_MASK;
-               mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
-               mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
-       }
-}
-
 /* Free Tx queue skbuffs */
 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                                struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
        int i;
 
        for (i = 0; i < num; i++) {
-               struct mvpp2_tx_desc *tx_desc = txq->descs +
-                                                       txq_pcpu->txq_get_index;
+               dma_addr_t buf_phys_addr =
+                                   txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
                struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
 
                mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                if (!skb)
                        continue;
 
-               dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
-                                tx_desc->data_size, DMA_TO_DEVICE);
+               dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
+                                skb_headlen(skb), DMA_TO_DEVICE);
                dev_kfree_skb_any(skb);
        }
 }
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
                                                        u32 cause)
 {
-       int queue = fls(cause >> 16) - 1;
+       int queue = fls(cause) - 1;
 
        return port->txqs[queue];
 }
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
                        netif_tx_wake_queue(nq);
 }
 
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+       struct mvpp2_tx_queue *txq;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       unsigned int tx_todo = 0;
+
+       while (cause) {
+               txq = mvpp2_get_tx_queue(port, cause);
+               if (!txq)
+                       break;
+
+               txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+               if (txq_pcpu->count) {
+                       mvpp2_txq_done(port, txq, txq_pcpu);
+                       tx_todo += txq_pcpu->count;
+               }
+
+               cause &= ~(1 << txq->log_id);
+       }
+       return tx_todo;
+}
+
 /* Rx/Tx queue initialization/cleanup methods */
 
 /* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
                txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
                                           sizeof(*txq_pcpu->tx_skb),
                                           GFP_KERNEL);
-               if (!txq_pcpu->tx_skb) {
-                       dma_free_coherent(port->dev->dev.parent,
-                                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
-                                         txq->descs, txq->descs_phys);
-                       return -ENOMEM;
-               }
+               if (!txq_pcpu->tx_skb)
+                       goto error;
+
+               txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
+                                            sizeof(dma_addr_t), GFP_KERNEL);
+               if (!txq_pcpu->tx_buffs)
+                       goto error;
 
                txq_pcpu->count = 0;
                txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        }
 
        return 0;
+
+error:
+       for_each_present_cpu(cpu) {
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+               kfree(txq_pcpu->tx_skb);
+               kfree(txq_pcpu->tx_buffs);
+       }
+
+       dma_free_coherent(port->dev->dev.parent,
+                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
+                         txq->descs, txq->descs_phys);
+
+       return -ENOMEM;
 }
 
 /* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
                kfree(txq_pcpu->tx_skb);
+               kfree(txq_pcpu->tx_buffs);
        }
 
        if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
                        goto err_cleanup;
        }
 
-       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
        on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
        return 0;
 
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
        }
 }
 
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+       ktime_t interval;
+
+       if (!port_pcpu->timer_scheduled) {
+               port_pcpu->timer_scheduled = true;
+               interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+               hrtimer_start(&port_pcpu->tx_done_timer, interval,
+                             HRTIMER_MODE_REL_PINNED);
+       }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+       struct net_device *dev = (struct net_device *)data;
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+       unsigned int tx_todo, cause;
+
+       if (!netif_running(dev))
+               return;
+       port_pcpu->timer_scheduled = false;
+
+       /* Process all the Tx queues */
+       cause = (1 << txq_number) - 1;
+       tx_todo = mvpp2_tx_done(port, cause);
+
+       /* Set the timer in case not all the packets were processed */
+       if (tx_todo)
+               mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+       struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+                                                        struct mvpp2_port_pcpu,
+                                                        tx_done_timer);
+
+       tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+       return HRTIMER_NORESTART;
+}
+
 /* Main RX/TX processing routines */
 
 /* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
                if (i == (skb_shinfo(skb)->nr_frags - 1)) {
                        /* Last descriptor */
                        tx_desc->command = MVPP2_TXD_L_DESC;
-                       mvpp2_txq_inc_put(txq_pcpu, skb);
+                       mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
                } else {
                        /* Descriptor in the middle: Not First, Not Last */
                        tx_desc->command = 0;
-                       mvpp2_txq_inc_put(txq_pcpu, NULL);
+                       mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
                }
        }
 
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
                /* First and Last descriptor */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
                tx_desc->command = tx_cmd;
-               mvpp2_txq_inc_put(txq_pcpu, skb);
+               mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
        } else {
                /* First but not Last */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
                tx_desc->command = tx_cmd;
-               mvpp2_txq_inc_put(txq_pcpu, NULL);
+               mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
 
                /* Continue with other skb fragments */
                if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
                dev_kfree_skb_any(skb);
        }
 
+       /* Finalize TX processing */
+       if (txq_pcpu->count >= txq->done_pkts_coal)
+               mvpp2_txq_done(port, txq, txq_pcpu);
+
+       /* Set the timer in case not all frags were processed */
+       if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+               struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+               mvpp2_timer_set(port_pcpu);
+       }
+
        return NETDEV_TX_OK;
 }
 
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
                netdev_err(dev, "tx fifo underrun error\n");
 }
 
-static void mvpp2_txq_done_percpu(void *arg)
+static int mvpp2_poll(struct napi_struct *napi, int budget)
 {
-       struct mvpp2_port *port = arg;
-       u32 cause_rx_tx, cause_tx, cause_misc;
+       u32 cause_rx_tx, cause_rx, cause_misc;
+       int rx_done = 0;
+       struct mvpp2_port *port = netdev_priv(napi->dev);
 
        /* Rx/Tx cause register
         *
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
         */
        cause_rx_tx = mvpp2_read(port->priv,
                                 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
-       cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+       cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
        cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
 
        if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
                            cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
        }
 
-       /* Release TX descriptors */
-       if (cause_tx) {
-               struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
-               struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
-               if (txq_pcpu->count)
-                       mvpp2_txq_done(port, txq, txq_pcpu);
-       }
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
-       u32 cause_rx_tx, cause_rx;
-       int rx_done = 0;
-       struct mvpp2_port *port = netdev_priv(napi->dev);
-
-       on_each_cpu(mvpp2_txq_done_percpu, port, 1);
-
-       cause_rx_tx = mvpp2_read(port->priv,
-                                MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
        cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
 
        /* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
 static int mvpp2_stop(struct net_device *dev)
 {
        struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_port_pcpu *port_pcpu;
+       int cpu;
 
        mvpp2_stop_dev(port);
        mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
        on_each_cpu(mvpp2_interrupts_mask, port, 1);
 
        free_irq(port->irq, port);
+       for_each_present_cpu(cpu) {
+               port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+               hrtimer_cancel(&port_pcpu->tx_done_timer);
+               port_pcpu->timer_scheduled = false;
+               tasklet_kill(&port_pcpu->tx_done_tasklet);
+       }
        mvpp2_cleanup_rxqs(port);
        mvpp2_cleanup_txqs(port);
 
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
                txq->done_pkts_coal = c->tx_max_coalesced_frames;
        }
 
-       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
        return 0;
 }
 
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 {
        struct device_node *phy_node;
        struct mvpp2_port *port;
+       struct mvpp2_port_pcpu *port_pcpu;
        struct net_device *dev;
        struct resource *res;
        const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        int features;
        int phy_mode;
        int priv_common_regs_num = 2;
-       int err, i;
+       int err, i, cpu;
 
        dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
                                 rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        }
        mvpp2_port_power_up(port);
 
+       port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+       if (!port->pcpu) {
+               err = -ENOMEM;
+               goto err_free_txq_pcpu;
+       }
+
+       for_each_present_cpu(cpu) {
+               port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+               hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+                            HRTIMER_MODE_REL_PINNED);
+               port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+               port_pcpu->timer_scheduled = false;
+
+               tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+                            (unsigned long)dev);
+       }
+
        netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
        features = NETIF_F_SG | NETIF_F_IP_CSUM;
        dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        err = register_netdev(dev);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register netdev\n");
-               goto err_free_txq_pcpu;
+               goto err_free_port_pcpu;
        }
        netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
 
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        priv->port_list[id] = port;
        return 0;
 
+err_free_port_pcpu:
+       free_percpu(port->pcpu);
 err_free_txq_pcpu:
        for (i = 0; i < txq_number; i++)
                free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
        int i;
 
        unregister_netdev(port->dev);
+       free_percpu(port->pcpu);
        free_percpu(port->stats);
        for (i = 0; i < txq_number; i++)
                free_percpu(port->txqs[i]->pcpu);
index 45f6dc75c0df99c9d7a8f92c976bd014bdad24f1..27ca4596775af4b29666b323b9e45e42378041f5 100644 (file)
 
 #define MLX5E_MAX_NUM_TC       8
 
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x1
 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
 
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
-#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ         0x7
 
+#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
+#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
 #define MLX5E_SQ_BF_BUDGET             16
@@ -92,6 +94,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
        "lro_bytes",
        "rx_csum_good",
        "rx_csum_none",
+       "rx_csum_sw",
        "tx_csum_offload",
        "tx_queue_stopped",
        "tx_queue_wake",
@@ -129,18 +132,94 @@ struct mlx5e_vport_stats {
        u64 lro_bytes;
        u64 rx_csum_good;
        u64 rx_csum_none;
+       u64 rx_csum_sw;
        u64 tx_csum_offload;
        u64 tx_queue_stopped;
        u64 tx_queue_wake;
        u64 tx_queue_dropped;
        u64 rx_wqe_err;
 
-#define NUM_VPORT_COUNTERS     31
+#define NUM_VPORT_COUNTERS     32
+};
+
+static const char pport_strings[][ETH_GSTRING_LEN] = {
+       /* IEEE802.3 counters */
+       "frames_tx",
+       "frames_rx",
+       "check_seq_err",
+       "alignment_err",
+       "octets_tx",
+       "octets_received",
+       "multicast_xmitted",
+       "broadcast_xmitted",
+       "multicast_rx",
+       "broadcast_rx",
+       "in_range_len_errors",
+       "out_of_range_len",
+       "too_long_errors",
+       "symbol_err",
+       "mac_control_tx",
+       "mac_control_rx",
+       "unsupported_op_rx",
+       "pause_ctrl_rx",
+       "pause_ctrl_tx",
+
+       /* RFC2863 counters */
+       "in_octets",
+       "in_ucast_pkts",
+       "in_discards",
+       "in_errors",
+       "in_unknown_protos",
+       "out_octets",
+       "out_ucast_pkts",
+       "out_discards",
+       "out_errors",
+       "in_multicast_pkts",
+       "in_broadcast_pkts",
+       "out_multicast_pkts",
+       "out_broadcast_pkts",
+
+       /* RFC2819 counters */
+       "drop_events",
+       "octets",
+       "pkts",
+       "broadcast_pkts",
+       "multicast_pkts",
+       "crc_align_errors",
+       "undersize_pkts",
+       "oversize_pkts",
+       "fragments",
+       "jabbers",
+       "collisions",
+       "p64octets",
+       "p65to127octets",
+       "p128to255octets",
+       "p256to511octets",
+       "p512to1023octets",
+       "p1024to1518octets",
+       "p1519to2047octets",
+       "p2048to4095octets",
+       "p4096to8191octets",
+       "p8192to10239octets",
+};
+
+#define NUM_IEEE_802_3_COUNTERS                19
+#define NUM_RFC_2863_COUNTERS          13
+#define NUM_RFC_2819_COUNTERS          21
+#define NUM_PPORT_COUNTERS             (NUM_IEEE_802_3_COUNTERS + \
+                                        NUM_RFC_2863_COUNTERS + \
+                                        NUM_RFC_2819_COUNTERS)
+
+struct mlx5e_pport_stats {
+       __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
+       __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
+       __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
 };
 
 static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
        "packets",
        "csum_none",
+       "csum_sw",
        "lro_packets",
        "lro_bytes",
        "wqe_err"
@@ -149,10 +228,11 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
 struct mlx5e_rq_stats {
        u64 packets;
        u64 csum_none;
+       u64 csum_sw;
        u64 lro_packets;
        u64 lro_bytes;
        u64 wqe_err;
-#define NUM_RQ_STATS 5
+#define NUM_RQ_STATS 6
 };
 
 static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
@@ -180,6 +260,7 @@ struct mlx5e_sq_stats {
 
 struct mlx5e_stats {
        struct mlx5e_vport_stats   vport;
+       struct mlx5e_pport_stats   pport;
 };
 
 struct mlx5e_params {
@@ -193,11 +274,12 @@ struct mlx5e_params {
        u16 tx_cq_moderation_usec;
        u16 tx_cq_moderation_pkts;
        u16 min_rx_wqes;
-       u16 rx_hash_log_tbl_sz;
        bool lro_en;
        u32 lro_wqe_sz;
-       u8  rss_hfunc;
        u16 tx_max_inline;
+       u8  rss_hfunc;
+       u8  toeplitz_hash_key[40];
+       u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 };
 
 enum {
@@ -217,6 +299,7 @@ struct mlx5e_cq {
        struct napi_struct        *napi;
        struct mlx5_core_cq        mcq;
        struct mlx5e_channel      *channel;
+       struct mlx5e_priv         *priv;
 
        /* control */
        struct mlx5_wq_ctrl        wq_ctrl;
@@ -240,6 +323,7 @@ struct mlx5e_rq {
        struct mlx5_wq_ctrl    wq_ctrl;
        u32                    rqn;
        struct mlx5e_channel  *channel;
+       struct mlx5e_priv     *priv;
 } ____cacheline_aligned_in_smp;
 
 struct mlx5e_tx_skb_cb {
@@ -344,10 +428,10 @@ enum mlx5e_traffic_types {
        MLX5E_NUM_TT,
 };
 
-enum {
-       MLX5E_RQT_SPREADING  = 0,
-       MLX5E_RQT_DEFAULT_RQ = 1,
-       MLX5E_NUM_RQT        = 2,
+enum mlx5e_rqt_ix {
+       MLX5E_INDIRECTION_RQT,
+       MLX5E_SINGLE_RQ_RQT,
+       MLX5E_NUM_RQT,
 };
 
 struct mlx5e_eth_addr_info {
@@ -372,10 +456,10 @@ struct mlx5e_eth_addr_db {
 enum {
        MLX5E_STATE_ASYNC_EVENTS_ENABLE,
        MLX5E_STATE_OPENED,
+       MLX5E_STATE_DESTROYING,
 };
 
 struct mlx5e_vlan_db {
-       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        u32           active_vlans_ft_ix[VLAN_N_VID];
        u32           untagged_rule_ft_ix;
        u32           any_vlan_rule_ft_ix;
@@ -399,10 +483,11 @@ struct mlx5e_priv {
        u32                        pdn;
        u32                        tdn;
        struct mlx5_core_mr        mr;
+       struct mlx5e_rq            drop_rq;
 
        struct mlx5e_channel     **channel;
        u32                        tisn[MLX5E_MAX_NUM_TC];
-       u32                        rqtn;
+       u32                        rqtn[MLX5E_NUM_RQT];
        u32                        tirn[MLX5E_NUM_TT];
 
        struct mlx5e_flow_table    ft;
@@ -479,10 +564,9 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
 
 void mlx5e_update_stats(struct mlx5e_priv *priv);
 
-int mlx5e_open_flow_table(struct mlx5e_priv *priv);
-void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
 void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
 void mlx5e_set_rx_mode_work(struct work_struct *work);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -491,8 +575,8 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
                           u16 vid);
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
index b95aa3384c367cda65fd6a875553cad0c8638d69..bce912688ca821e5b17f6781c16a0cec713913ef 100644 (file)
@@ -171,7 +171,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
 
        switch (sset) {
        case ETH_SS_STATS:
-               return NUM_VPORT_COUNTERS +
+               return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
                       priv->params.num_channels * NUM_RQ_STATS +
                       priv->params.num_channels * priv->params.num_tc *
                                                   NUM_SQ_STATS;
@@ -200,6 +200,11 @@ static void mlx5e_get_strings(struct net_device *dev,
                        strcpy(data + (idx++) * ETH_GSTRING_LEN,
                               vport_strings[i]);
 
+               /* PPORT counters */
+               for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+                       strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                              pport_strings[i]);
+
                /* per channel counters */
                for (i = 0; i < priv->params.num_channels; i++)
                        for (j = 0; j < NUM_RQ_STATS; j++)
@@ -234,6 +239,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
                data[idx++] = ((u64 *)&priv->stats.vport)[i];
 
+       for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+               data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+
        /* per channel counters */
        for (i = 0; i < priv->params.num_channels; i++)
                for (j = 0; j < NUM_RQ_STATS; j++)
@@ -620,7 +628,7 @@ static int mlx5e_set_settings(struct net_device *netdev,
        u32 link_modes;
        u32 speed;
        u32 eth_proto_cap, eth_proto_admin;
-       u8 port_status;
+       enum mlx5_port_status ps;
        int err;
 
        speed = ethtool_cmd_speed(cmd);
@@ -654,33 +662,42 @@ static int mlx5e_set_settings(struct net_device *netdev,
        if (link_modes == eth_proto_admin)
                goto out;
 
-       err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
-       if (err) {
-               netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
-                          __func__, err);
-               goto out;
-       }
-
-       err = mlx5_query_port_status(mdev, &port_status);
-       if (err)
-               goto out;
+       mlx5_query_port_admin_status(mdev, &ps);
+       if (ps == MLX5_PORT_UP)
+               mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+       mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+       if (ps == MLX5_PORT_UP)
+               mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
 
-       if (port_status == MLX5_PORT_DOWN)
-               return 0;
-
-       err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
-       if (err)
-               goto out;
-       err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
 out:
        return err;
 }
 
+static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       return sizeof(priv->params.toeplitz_hash_key);
+}
+
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+       return MLX5E_INDIR_RQT_SIZE;
+}
+
 static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
                          u8 *hfunc)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
+       if (indir)
+               memcpy(indir, priv->params.indirection_rqt,
+                      sizeof(priv->params.indirection_rqt));
+
+       if (key)
+               memcpy(key, priv->params.toeplitz_hash_key,
+                      sizeof(priv->params.toeplitz_hash_key));
+
        if (hfunc)
                *hfunc = priv->params.rss_hfunc;
 
@@ -691,28 +708,60 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
                          const u8 *key, const u8 hfunc)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
+       bool close_open;
        int err = 0;
 
-       if (hfunc == ETH_RSS_HASH_NO_CHANGE)
-               return 0;
-
-       if ((hfunc != ETH_RSS_HASH_XOR) &&
+       if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
+           (hfunc != ETH_RSS_HASH_XOR) &&
            (hfunc != ETH_RSS_HASH_TOP))
                return -EINVAL;
 
        mutex_lock(&priv->state_lock);
 
-       priv->params.rss_hfunc = hfunc;
-       if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-               mlx5e_close_locked(dev);
-               err = mlx5e_open_locked(dev);
+       if (indir) {
+               memcpy(priv->params.indirection_rqt, indir,
+                      sizeof(priv->params.indirection_rqt));
+               mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
        }
 
+       close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
+                    test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (close_open)
+               mlx5e_close_locked(dev);
+
+       if (key)
+               memcpy(priv->params.toeplitz_hash_key, key,
+                      sizeof(priv->params.toeplitz_hash_key));
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+               priv->params.rss_hfunc = hfunc;
+
+       if (close_open)
+               err = mlx5e_open_locked(priv->netdev);
+
        mutex_unlock(&priv->state_lock);
 
        return err;
 }
 
+static int mlx5e_get_rxnfc(struct net_device *netdev,
+                          struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err = 0;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = priv->params.num_channels;
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
 static int mlx5e_get_tunable(struct net_device *dev,
                             const struct ethtool_tunable *tuna,
                             void *data)
@@ -771,6 +820,42 @@ static int mlx5e_set_tunable(struct net_device *dev,
        return err;
 }
 
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+                                struct ethtool_pauseparam *pauseparam)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int err;
+
+       err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
+                                   &pauseparam->tx_pause);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
+                          __func__, err);
+       }
+}
+
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+                               struct ethtool_pauseparam *pauseparam)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int err;
+
+       if (pauseparam->autoneg)
+               return -EINVAL;
+
+       err = mlx5_set_port_pause(mdev,
+                                 pauseparam->rx_pause ? 1 : 0,
+                                 pauseparam->tx_pause ? 1 : 0);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+                          __func__, err);
+       }
+
+       return err;
+}
+
 const struct ethtool_ops mlx5e_ethtool_ops = {
        .get_drvinfo       = mlx5e_get_drvinfo,
        .get_link          = ethtool_op_get_link,
@@ -785,8 +870,13 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
        .set_coalesce      = mlx5e_set_coalesce,
        .get_settings      = mlx5e_get_settings,
        .set_settings      = mlx5e_set_settings,
+       .get_rxfh_key_size   = mlx5e_get_rxfh_key_size,
+       .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
        .get_rxfh          = mlx5e_get_rxfh,
        .set_rxfh          = mlx5e_set_rxfh,
+       .get_rxnfc         = mlx5e_get_rxnfc,
        .get_tunable       = mlx5e_get_tunable,
        .set_tunable       = mlx5e_set_tunable,
+       .get_pauseparam    = mlx5e_get_pauseparam,
+       .set_pauseparam    = mlx5e_set_pauseparam,
 };
index 70ec31b9e1e96b135829430df45a416444a21a11..e71563ce05d1bc34123fd4aa63348d569adf4c57 100644 (file)
@@ -594,44 +594,28 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
 
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
 {
-       WARN_ON(!mutex_is_locked(&priv->state_lock));
+       if (!priv->vlan.filter_disabled)
+               return;
 
-       if (priv->vlan.filter_disabled) {
-               priv->vlan.filter_disabled = false;
-               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-                       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-                                           0);
-       }
+       priv->vlan.filter_disabled = false;
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
 {
-       WARN_ON(!mutex_is_locked(&priv->state_lock));
+       if (priv->vlan.filter_disabled)
+               return;
 
-       if (!priv->vlan.filter_disabled) {
-               priv->vlan.filter_disabled = true;
-               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-                       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-                                           0);
-       }
+       priv->vlan.filter_disabled = true;
+       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
                          u16 vid)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       int err = 0;
-
-       mutex_lock(&priv->state_lock);
-
-       set_bit(vid, priv->vlan.active_vlans);
-       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-               err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
-                                         vid);
 
-       mutex_unlock(&priv->state_lock);
-
-       return err;
+       return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 }
 
 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -639,56 +623,11 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       mutex_lock(&priv->state_lock);
-
-       clear_bit(vid, priv->vlan.active_vlans);
-       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-
-       mutex_unlock(&priv->state_lock);
-
-       return 0;
-}
-
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
-{
-       u16 vid;
-       int err;
-
-       for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
-               err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
-                                         vid);
-               if (err)
-                       return err;
-       }
-
-       err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-       if (err)
-               return err;
-
-       if (priv->vlan.filter_disabled) {
-               err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-                                         0);
-               if (err)
-                       return err;
-       }
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 
        return 0;
 }
 
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
-{
-       u16 vid;
-
-       if (priv->vlan.filter_disabled)
-               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-
-       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-
-       for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
-               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-}
-
 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
        for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
                hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -752,18 +691,21 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
        mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
                hn->action = MLX5E_ACTION_DEL;
 
-       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+       if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
                mlx5e_sync_netdev_addr(priv);
 
        mlx5e_apply_netdev_addr(priv);
 }
 
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+void mlx5e_set_rx_mode_work(struct work_struct *work)
 {
+       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+                                              set_rx_mode_work);
+
        struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
        struct net_device *ndev = priv->netdev;
 
-       bool rx_mode_enable   = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
        bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
        bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
        bool broadcast_enabled = rx_mode_enable;
@@ -796,17 +738,6 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
        ea->broadcast_enabled = broadcast_enabled;
 }
 
-void mlx5e_set_rx_mode_work(struct work_struct *work)
-{
-       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
-                                              set_rx_mode_work);
-
-       mutex_lock(&priv->state_lock);
-       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-               mlx5e_set_rx_mode_core(priv);
-       mutex_unlock(&priv->state_lock);
-}
-
 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
 {
        ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
@@ -929,7 +860,7 @@ static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
        mlx5_destroy_flow_table(priv->ft.vlan);
 }
 
-int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
 {
        int err;
 
@@ -941,16 +872,24 @@ int mlx5e_open_flow_table(struct mlx5e_priv *priv)
        if (err)
                goto err_destroy_main_flow_table;
 
+       err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+       if (err)
+               goto err_destroy_vlan_flow_table;
+
        return 0;
 
+err_destroy_vlan_flow_table:
+       mlx5e_destroy_vlan_flow_table(priv);
+
 err_destroy_main_flow_table:
        mlx5e_destroy_main_flow_table(priv);
 
        return err;
 }
 
-void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
 {
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
        mlx5e_destroy_vlan_flow_table(priv);
        mlx5e_destroy_main_flow_table(priv);
 }
index bb815893d3a8b40cb22f0a938b14487b028a9f56..55166dd5b4ea5a16f77c91b6a2dc6cfb9a88b77e 100644 (file)
@@ -82,6 +82,47 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
        mutex_unlock(&priv->state_lock);
 }
 
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_pport_stats *s = &priv->stats.pport;
+       u32 *in;
+       u32 *out;
+       int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+       in  = mlx5_vzalloc(sz);
+       out = mlx5_vzalloc(sz);
+       if (!in || !out)
+               goto free_out;
+
+       MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out,
+                            sz, MLX5_REG_PPCNT, 0, 0);
+       memcpy(s->IEEE_802_3_counters,
+              MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+              sizeof(s->IEEE_802_3_counters));
+
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out,
+                            sz, MLX5_REG_PPCNT, 0, 0);
+       memcpy(s->RFC_2863_counters,
+              MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+              sizeof(s->RFC_2863_counters));
+
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out,
+                            sz, MLX5_REG_PPCNT, 0, 0);
+       memcpy(s->RFC_2819_counters,
+              MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+              sizeof(s->RFC_2819_counters));
+
+free_out:
+       kvfree(in);
+       kvfree(out);
+}
+
 void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
@@ -108,6 +149,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
        s->lro_packets          = 0;
        s->lro_bytes            = 0;
        s->rx_csum_none         = 0;
+       s->rx_csum_sw           = 0;
        s->rx_wqe_err           = 0;
        for (i = 0; i < priv->params.num_channels; i++) {
                rq_stats = &priv->channel[i]->rq.stats;
@@ -115,6 +157,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
                s->lro_packets  += rq_stats->lro_packets;
                s->lro_bytes    += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
+               s->rx_csum_sw   += rq_stats->csum_sw;
                s->rx_wqe_err   += rq_stats->wqe_err;
 
                for (j = 0; j < priv->params.num_tc; j++) {
@@ -200,8 +243,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
 
        /* Update calculated offload counters */
        s->tx_csum_offload = s->tx_packets - tx_offload_none;
-       s->rx_csum_good    = s->rx_packets - s->rx_csum_none;
+       s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
+                              s->rx_csum_sw;
 
+       mlx5e_update_pport_counters(priv);
 free_out:
        kvfree(out);
 }
@@ -307,6 +352,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
        rq->netdev  = c->netdev;
        rq->channel = c;
        rq->ix      = c->ix;
+       rq->priv    = c->priv;
 
        return 0;
 
@@ -324,8 +370,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
 
 static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 {
-       struct mlx5e_channel *c = rq->channel;
-       struct mlx5e_priv *priv = c->priv;
+       struct mlx5e_priv *priv = rq->priv;
        struct mlx5_core_dev *mdev = priv->mdev;
 
        void *in;
@@ -392,11 +437,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
 
 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
 {
-       struct mlx5e_channel *c = rq->channel;
-       struct mlx5e_priv *priv = c->priv;
-       struct mlx5_core_dev *mdev = priv->mdev;
-
-       mlx5_core_destroy_rq(mdev, rq->rqn);
+       mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
 }
 
 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
@@ -740,6 +781,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
        }
 
        cq->channel = c;
+       cq->priv = priv;
 
        return 0;
 }
@@ -751,8 +793,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
 
 static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 {
-       struct mlx5e_channel *c = cq->channel;
-       struct mlx5e_priv *priv = c->priv;
+       struct mlx5e_priv *priv = cq->priv;
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_core_cq *mcq = &cq->mcq;
 
@@ -798,8 +839,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 
 static void mlx5e_disable_cq(struct mlx5e_cq *cq)
 {
-       struct mlx5e_channel *c = cq->channel;
-       struct mlx5e_priv *priv = c->priv;
+       struct mlx5e_priv *priv = cq->priv;
        struct mlx5_core_dev *mdev = priv->mdev;
 
        mlx5_core_destroy_cq(mdev, &cq->mcq);
@@ -1119,112 +1159,418 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
        kfree(priv->channel);
 }
 
-static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+static int mlx5e_rx_hash_fn(int hfunc)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
-       u32 in[MLX5_ST_SZ_DW(create_tis_in)];
-       void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+       return (hfunc == ETH_RSS_HASH_TOP) ?
+              MLX5_RX_HASH_FN_TOEPLITZ :
+              MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
 
-       memset(in, 0, sizeof(in));
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+       int inv = 0;
+       int i;
 
-       MLX5_SET(tisc, tisc, prio,  tc);
-       MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+       for (i = 0; i < size; i++)
+               inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
 
-       return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+       return inv;
 }
 
-static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
 {
-       mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+       int i;
+
+       for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+               int ix = i;
+
+               if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+                       ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
+
+               ix = priv->params.indirection_rqt[ix];
+               ix = ix % priv->params.num_channels;
+               MLX5_SET(rqtc, rqtc, rq_num[i],
+                        test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+                        priv->channel[ix]->rq.rqn :
+                        priv->drop_rq.rqn);
+       }
 }
 
-static int mlx5e_open_tises(struct mlx5e_priv *priv)
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
+                               enum mlx5e_rqt_ix rqt_ix)
 {
-       int err;
-       int tc;
 
-       for (tc = 0; tc < priv->params.num_tc; tc++) {
-               err = mlx5e_open_tis(priv, tc);
-               if (err)
-                       goto err_close_tises;
+       switch (rqt_ix) {
+       case MLX5E_INDIRECTION_RQT:
+               mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+
+               break;
+
+       default: /* MLX5E_SINGLE_RQ_RQT */
+               MLX5_SET(rqtc, rqtc, rq_num[0],
+                        test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+                        priv->channel[0]->rq.rqn :
+                        priv->drop_rq.rqn);
+
+               break;
        }
+}
 
-       return 0;
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 *in;
+       void *rqtc;
+       int inlen;
+       int sz;
+       int err;
 
-err_close_tises:
-       for (tc--; tc >= 0; tc--)
-               mlx5e_close_tis(priv, tc);
+       sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+
+       inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+       MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+       MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+       mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+
+       err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
+
+       kvfree(in);
 
        return err;
 }
 
-static void mlx5e_close_tises(struct mlx5e_priv *priv)
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
 {
-       int tc;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 *in;
+       void *rqtc;
+       int inlen;
+       int sz;
+       int err;
 
-       for (tc = 0; tc < priv->params.num_tc; tc++)
-               mlx5e_close_tis(priv, tc);
+       sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+
+       inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
+
+       MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+
+       mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+
+       MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
+
+       err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
+
+       kvfree(in);
+
+       return err;
 }
 
-static int mlx5e_rx_hash_fn(int hfunc)
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
 {
-       return (hfunc == ETH_RSS_HASH_TOP) ?
-              MLX5_RX_HASH_FN_TOEPLITZ :
-              MLX5_RX_HASH_FN_INVERTED_XOR8;
+       mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
 }
 
-static int mlx5e_bits_invert(unsigned long a, int size)
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
 {
-       int inv = 0;
-       int i;
+       mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+       mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+}
 
-       for (i = 0; i < size; i++)
-               inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+{
+       if (!priv->params.lro_en)
+               return;
 
-       return inv;
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+       MLX5_SET(tirc, tirc, lro_enable_mask,
+                MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+                MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+       MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+                (priv->params.lro_wqe_sz -
+                 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+       MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+                MLX5_CAP_ETH(priv->mdev,
+                             lro_timer_supported_periods[2]));
 }
 
-static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       u32 *in;
-       void *rqtc;
+
+       void *in;
+       void *tirc;
        int inlen;
        int err;
-       int log_tbl_sz = priv->params.rx_hash_log_tbl_sz;
-       int sz = 1 << log_tbl_sz;
-       int i;
 
-       inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+       inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
        if (!in)
                return -ENOMEM;
 
-       rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+       MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
+       tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
 
-       MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-       MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+       mlx5e_build_tir_ctx_lro(tirc, priv);
 
-       for (i = 0; i < sz; i++) {
-               int ix = i;
+       err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
 
-               if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
-                       ix = mlx5e_bits_invert(i, log_tbl_sz);
+       kvfree(in);
 
-               ix = ix % priv->params.num_channels;
-               MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+       return err;
+}
+
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int hw_mtu;
+       int err;
+
+       err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+       if (err)
+               return err;
+
+       mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+       if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
+               netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
+                           __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+
+       netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+       return 0;
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int num_txqs;
+       int err;
+
+       set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+       num_txqs = priv->params.num_channels * priv->params.num_tc;
+       netif_set_real_num_tx_queues(netdev, num_txqs);
+       netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+       err = mlx5e_set_dev_port_mtu(netdev);
+       if (err)
+               return err;
+
+       err = mlx5e_open_channels(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+                          __func__, err);
+               return err;
        }
 
-       err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn);
+       mlx5e_update_carrier(priv);
+       mlx5e_redirect_rqts(priv);
 
-       kvfree(in);
+       schedule_delayed_work(&priv->update_stats_work, 0);
+
+       return 0;
+}
+
+static int mlx5e_open(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       mutex_lock(&priv->state_lock);
+       err = mlx5e_open_locked(netdev);
+       mutex_unlock(&priv->state_lock);
 
        return err;
 }
 
-static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+int mlx5e_close_locked(struct net_device *netdev)
 {
-       mlx5_core_destroy_rqt(priv->mdev, priv->rqtn);
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+       mlx5e_redirect_rqts(priv);
+       netif_carrier_off(priv->netdev);
+       mlx5e_close_channels(priv);
+
+       return 0;
+}
+
+static int mlx5e_close(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       mutex_lock(&priv->state_lock);
+       err = mlx5e_close_locked(netdev);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
+                               struct mlx5e_rq *rq,
+                               struct mlx5e_rq_param *param)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       void *rqc = param->rqc;
+       void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+       int err;
+
+       param->wq.db_numa_node = param->wq.buf_numa_node;
+
+       err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+                               &rq->wq_ctrl);
+       if (err)
+               return err;
+
+       rq->priv = priv;
+
+       return 0;
+}
+
+static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
+                               struct mlx5e_cq *cq,
+                               struct mlx5e_cq_param *param)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_core_cq *mcq = &cq->mcq;
+       int eqn_not_used;
+       int irqn;
+       int err;
+
+       err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+                              &cq->wq_ctrl);
+       if (err)
+               return err;
+
+       mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+       mcq->cqe_sz     = 64;
+       mcq->set_ci_db  = cq->wq_ctrl.db.db;
+       mcq->arm_db     = cq->wq_ctrl.db.db + 1;
+       *mcq->set_ci_db = 0;
+       *mcq->arm_db    = 0;
+       mcq->vector     = param->eq_ix;
+       mcq->comp       = mlx5e_completion_event;
+       mcq->event      = mlx5e_cq_error_event;
+       mcq->irqn       = irqn;
+       mcq->uar        = &priv->cq_uar;
+
+       cq->priv = priv;
+
+       return 0;
+}
+
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+{
+       struct mlx5e_cq_param cq_param;
+       struct mlx5e_rq_param rq_param;
+       struct mlx5e_rq *rq = &priv->drop_rq;
+       struct mlx5e_cq *cq = &priv->drop_rq.cq;
+       int err;
+
+       memset(&cq_param, 0, sizeof(cq_param));
+       memset(&rq_param, 0, sizeof(rq_param));
+       mlx5e_build_rx_cq_param(priv, &cq_param);
+       mlx5e_build_rq_param(priv, &rq_param);
+
+       err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+       if (err)
+               return err;
+
+       err = mlx5e_enable_cq(cq, &cq_param);
+       if (err)
+               goto err_destroy_cq;
+
+       err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+       if (err)
+               goto err_disable_cq;
+
+       err = mlx5e_enable_rq(rq, &rq_param);
+       if (err)
+               goto err_destroy_rq;
+
+       return 0;
+
+err_destroy_rq:
+       mlx5e_destroy_rq(&priv->drop_rq);
+
+err_disable_cq:
+       mlx5e_disable_cq(&priv->drop_rq.cq);
+
+err_destroy_cq:
+       mlx5e_destroy_cq(&priv->drop_rq.cq);
+
+       return err;
+}
+
+static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+{
+       mlx5e_disable_rq(&priv->drop_rq);
+       mlx5e_destroy_rq(&priv->drop_rq);
+       mlx5e_disable_cq(&priv->drop_rq.cq);
+       mlx5e_destroy_cq(&priv->drop_rq.cq);
+}
+
+static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+       void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(tisc, tisc, prio,  tc);
+       MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+       return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+{
+       mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_create_tises(struct mlx5e_priv *priv)
+{
+       int err;
+       int tc;
+
+       for (tc = 0; tc < priv->params.num_tc; tc++) {
+               err = mlx5e_create_tis(priv, tc);
+               if (err)
+                       goto err_close_tises;
+       }
+
+       return 0;
+
+err_close_tises:
+       for (tc--; tc >= 0; tc--)
+               mlx5e_destroy_tis(priv, tc);
+
+       return err;
+}
+
+static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+{
+       int tc;
+
+       for (tc = 0; tc < priv->params.num_tc; tc++)
+               mlx5e_destroy_tis(priv, tc);
 }
 
 static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
@@ -1233,8 +1579,6 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
 
        MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
 
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
-
 #define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
                                 MLX5_HASH_FIELD_SEL_DST_IP)
 
@@ -1247,30 +1591,19 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                                 MLX5_HASH_FIELD_SEL_DST_IP   |\
                                 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
 
-       if (priv->params.lro_en) {
-               MLX5_SET(tirc, tirc, lro_enable_mask,
-                        MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
-                        MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
-               MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
-                        (priv->params.lro_wqe_sz -
-                         ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
-               MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
-                        MLX5_CAP_ETH(priv->mdev,
-                                     lro_timer_supported_periods[3]));
-       }
+       mlx5e_build_tir_ctx_lro(tirc, priv);
+
+       MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
 
        switch (tt) {
        case MLX5E_TT_ANY:
-               MLX5_SET(tirc, tirc, disp_type,
-                        MLX5_TIRC_DISP_TYPE_DIRECT);
-               MLX5_SET(tirc, tirc, inline_rqn,
-                        priv->channel[0]->rq.rqn);
+               MLX5_SET(tirc, tirc, indirect_table,
+                        priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
+               MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
                break;
        default:
-               MLX5_SET(tirc, tirc, disp_type,
-                        MLX5_TIRC_DISP_TYPE_INDIRECT);
                MLX5_SET(tirc, tirc, indirect_table,
-                        priv->rqtn);
+                        priv->rqtn[MLX5E_INDIRECTION_RQT]);
                MLX5_SET(tirc, tirc, rx_hash_fn,
                         mlx5e_rx_hash_fn(priv->params.rss_hfunc));
                if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -1280,7 +1613,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                                                       rx_hash_toeplitz_key);
 
                        MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-                       netdev_rss_key_fill(rss_key, len);
+                       memcpy(rss_key, priv->params.toeplitz_hash_key, len);
                }
                break;
        }
@@ -1366,7 +1699,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
        }
 }
 
-static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        u32 *in;
@@ -1390,184 +1723,37 @@ static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
        return err;
 }
 
-static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
 {
        mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
 }
 
-static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
 {
        int err;
        int i;
 
        for (i = 0; i < MLX5E_NUM_TT; i++) {
-               err = mlx5e_open_tir(priv, i);
+               err = mlx5e_create_tir(priv, i);
                if (err)
-                       goto err_close_tirs;
+                       goto err_destroy_tirs;
        }
 
        return 0;
 
-err_close_tirs:
+err_destroy_tirs:
        for (i--; i >= 0; i--)
-               mlx5e_close_tir(priv, i);
+               mlx5e_destroy_tir(priv, i);
 
        return err;
 }
 
-static void mlx5e_close_tirs(struct mlx5e_priv *priv)
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
 {
        int i;
 
        for (i = 0; i < MLX5E_NUM_TT; i++)
-               mlx5e_close_tir(priv, i);
-}
-
-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
-{
-       struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5_core_dev *mdev = priv->mdev;
-       int hw_mtu;
-       int err;
-
-       err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
-       if (err)
-               return err;
-
-       mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
-
-       if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
-               netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
-                           __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
-
-       netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
-       return 0;
-}
-
-int mlx5e_open_locked(struct net_device *netdev)
-{
-       struct mlx5e_priv *priv = netdev_priv(netdev);
-       int num_txqs;
-       int err;
-
-       num_txqs = priv->params.num_channels * priv->params.num_tc;
-       netif_set_real_num_tx_queues(netdev, num_txqs);
-       netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
-
-       err = mlx5e_set_dev_port_mtu(netdev);
-       if (err)
-               return err;
-
-       err = mlx5e_open_tises(priv);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
-                          __func__, err);
-               return err;
-       }
-
-       err = mlx5e_open_channels(priv);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
-                          __func__, err);
-               goto err_close_tises;
-       }
-
-       err = mlx5e_open_rqt(priv);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
-                          __func__, err);
-               goto err_close_channels;
-       }
-
-       err = mlx5e_open_tirs(priv);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
-                          __func__, err);
-               goto err_close_rqls;
-       }
-
-       err = mlx5e_open_flow_table(priv);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
-                          __func__, err);
-               goto err_close_tirs;
-       }
-
-       err = mlx5e_add_all_vlan_rules(priv);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
-                          __func__, err);
-               goto err_close_flow_table;
-       }
-
-       mlx5e_init_eth_addr(priv);
-
-       set_bit(MLX5E_STATE_OPENED, &priv->state);
-
-       mlx5e_update_carrier(priv);
-       mlx5e_set_rx_mode_core(priv);
-
-       schedule_delayed_work(&priv->update_stats_work, 0);
-       return 0;
-
-err_close_flow_table:
-       mlx5e_close_flow_table(priv);
-
-err_close_tirs:
-       mlx5e_close_tirs(priv);
-
-err_close_rqls:
-       mlx5e_close_rqt(priv);
-
-err_close_channels:
-       mlx5e_close_channels(priv);
-
-err_close_tises:
-       mlx5e_close_tises(priv);
-
-       return err;
-}
-
-static int mlx5e_open(struct net_device *netdev)
-{
-       struct mlx5e_priv *priv = netdev_priv(netdev);
-       int err;
-
-       mutex_lock(&priv->state_lock);
-       err = mlx5e_open_locked(netdev);
-       mutex_unlock(&priv->state_lock);
-
-       return err;
-}
-
-int mlx5e_close_locked(struct net_device *netdev)
-{
-       struct mlx5e_priv *priv = netdev_priv(netdev);
-
-       clear_bit(MLX5E_STATE_OPENED, &priv->state);
-
-       mlx5e_set_rx_mode_core(priv);
-       mlx5e_del_all_vlan_rules(priv);
-       netif_carrier_off(priv->netdev);
-       mlx5e_close_flow_table(priv);
-       mlx5e_close_tirs(priv);
-       mlx5e_close_rqt(priv);
-       mlx5e_close_channels(priv);
-       mlx5e_close_tises(priv);
-
-       return 0;
-}
-
-static int mlx5e_close(struct net_device *netdev)
-{
-       struct mlx5e_priv *priv = netdev_priv(netdev);
-       int err;
-
-       mutex_lock(&priv->state_lock);
-       err = mlx5e_close_locked(netdev);
-       mutex_unlock(&priv->state_lock);
-
-       return err;
+               mlx5e_destroy_tir(priv, i);
 }
 
 static struct rtnl_link_stats64 *
@@ -1631,11 +1817,15 @@ static int mlx5e_set_features(struct net_device *netdev,
                        mlx5e_close_locked(priv->netdev);
 
                priv->params.lro_en = !!(features & NETIF_F_LRO);
+               mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
+               mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
 
                if (was_opened)
                        err = mlx5e_open_locked(priv->netdev);
        }
 
+       mutex_unlock(&priv->state_lock);
+
        if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
                if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
                        mlx5e_enable_vlan_filter(priv);
@@ -1643,8 +1833,6 @@ static int mlx5e_set_features(struct net_device *netdev,
                        mlx5e_disable_vlan_filter(priv);
        }
 
-       mutex_unlock(&priv->state_lock);
-
        return 0;
 }
 
@@ -1725,9 +1913,10 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
 
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                                    struct net_device *netdev,
-                                   int num_comp_vectors)
+                                   int num_channels)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       int i;
 
        priv->params.log_sq_size           =
                MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -1744,22 +1933,22 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
        priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
        priv->params.min_rx_wqes           =
                MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
-       priv->params.rx_hash_log_tbl_sz    =
-               (order_base_2(num_comp_vectors) >
-                MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
-               order_base_2(num_comp_vectors)           :
-               MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
        priv->params.num_tc                = 1;
        priv->params.default_vlan_prio     = 0;
        priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
-       priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+       netdev_rss_key_fill(priv->params.toeplitz_hash_key,
+                           sizeof(priv->params.toeplitz_hash_key));
+
+       for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+               priv->params.indirection_rqt[i] = i % num_channels;
+
        priv->params.lro_wqe_sz            =
                MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 
        priv->mdev                         = mdev;
        priv->netdev                       = netdev;
-       priv->params.num_channels          = num_comp_vectors;
+       priv->params.num_channels          = num_channels;
        priv->default_vlan_prio            = priv->params.default_vlan_prio;
 
        spin_lock_init(&priv->async_events_spinlock);
@@ -1848,19 +2037,20 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 {
        struct net_device *netdev;
        struct mlx5e_priv *priv;
-       int ncv = mdev->priv.eq_table.num_comp_vectors;
+       int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
+                       MLX5E_MAX_NUM_CHANNELS);
        int err;
 
        if (mlx5e_check_required_hca_cap(mdev))
                return NULL;
 
-       netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+       netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
        if (!netdev) {
                mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
                return NULL;
        }
 
-       mlx5e_build_netdev_priv(mdev, netdev, ncv);
+       mlx5e_build_netdev_priv(mdev, netdev, nch);
        mlx5e_build_netdev(netdev);
 
        netif_carrier_off(netdev);
@@ -1891,16 +2081,73 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                goto err_dealloc_transport_domain;
        }
 
+       err = mlx5e_create_tises(priv);
+       if (err) {
+               mlx5_core_warn(mdev, "create tises failed, %d\n", err);
+               goto err_destroy_mkey;
+       }
+
+       err = mlx5e_open_drop_rq(priv);
+       if (err) {
+               mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+               goto err_destroy_tises;
+       }
+
+       err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+       if (err) {
+               mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+               goto err_close_drop_rq;
+       }
+
+       err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+       if (err) {
+               mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
+               goto err_destroy_rqt_indir;
+       }
+
+       err = mlx5e_create_tirs(priv);
+       if (err) {
+               mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
+               goto err_destroy_rqt_single;
+       }
+
+       err = mlx5e_create_flow_tables(priv);
+       if (err) {
+               mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+               goto err_destroy_tirs;
+       }
+
+       mlx5e_init_eth_addr(priv);
+
        err = register_netdev(netdev);
        if (err) {
                mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
-               goto err_destroy_mkey;
+               goto err_destroy_flow_tables;
        }
 
        mlx5e_enable_async_events(priv);
+       schedule_work(&priv->set_rx_mode_work);
 
        return priv;
 
+err_destroy_flow_tables:
+       mlx5e_destroy_flow_tables(priv);
+
+err_destroy_tirs:
+       mlx5e_destroy_tirs(priv);
+
+err_destroy_rqt_single:
+       mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+
+err_destroy_rqt_indir:
+       mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+
+err_close_drop_rq:
+       mlx5e_close_drop_rq(priv);
+
+err_destroy_tises:
+       mlx5e_destroy_tises(priv);
+
 err_destroy_mkey:
        mlx5_core_destroy_mkey(mdev, &priv->mr);
 
@@ -1924,13 +2171,22 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        struct mlx5e_priv *priv = vpriv;
        struct net_device *netdev = priv->netdev;
 
+       set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+
+       schedule_work(&priv->set_rx_mode_work);
+       mlx5e_disable_async_events(priv);
+       flush_scheduled_work();
        unregister_netdev(netdev);
+       mlx5e_destroy_flow_tables(priv);
+       mlx5e_destroy_tirs(priv);
+       mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+       mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+       mlx5e_close_drop_rq(priv);
+       mlx5e_destroy_tises(priv);
        mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
        mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
-       mlx5e_disable_async_events(priv);
-       flush_scheduled_work();
        free_netdev(netdev);
 }
 
index 9a9374131f5b45e7c740f80e2ef8b0e9ba175fa9..cf0098596e85847ad558896cc8ec4731b0420192 100644 (file)
@@ -111,10 +111,12 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
                tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
                                        sizeof(struct iphdr));
                ipv6 = NULL;
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
        } else {
                tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
                                        sizeof(struct ipv6hdr));
                ipv4 = NULL;
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
        }
 
        if (get_cqe_lro_tcppsh(cqe))
@@ -149,6 +151,38 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
        skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
 }
 
+static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+{
+       __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+
+       return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_handle_csum(struct net_device *netdev,
+                                    struct mlx5_cqe64 *cqe,
+                                    struct mlx5e_rq *rq,
+                                    struct sk_buff *skb)
+{
+       if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+               goto csum_none;
+
+       if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else if (is_first_ethertype_ip(skb)) {
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+               rq->stats.csum_sw++;
+       } else {
+               goto csum_none;
+       }
+
+       return;
+
+csum_none:
+       skb->ip_summed = CHECKSUM_NONE;
+       rq->stats.csum_none++;
+}
+
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
                                      struct mlx5e_rq *rq,
                                      struct sk_buff *skb)
@@ -162,20 +196,12 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
        lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
        if (lro_num_seg > 1) {
                mlx5e_lro_update_hdr(skb, cqe);
-               skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+               skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
                rq->stats.lro_packets++;
                rq->stats.lro_bytes += cqe_bcnt;
        }
 
-       if (likely(netdev->features & NETIF_F_RXCSUM) &&
-           (cqe->hds_ip_ext & CQE_L2_OK) &&
-           (cqe->hds_ip_ext & CQE_L3_OK) &&
-           (cqe->hds_ip_ext & CQE_L4_OK)) {
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               skb->ip_summed = CHECKSUM_NONE;
-               rq->stats.csum_none++;
-       }
+       mlx5e_handle_csum(netdev, cqe, rq, skb);
 
        skb->protocol = eth_type_trans(skb, netdev);
 
index 603a8b0908eea74a39bb88d9736d200d8acc3573..03aabdd79abe77f3227630260044c734d8f17ad4 100644 (file)
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        /* disable cmdif checksum */
        MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
+       MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
        err = set_caps(dev, set_ctx, set_sz);
 
 query_ex:
index 70147999f6574f9fc1e236b6b43ce0610cbd4d3b..821caaab9bfb04697fb0424cb8498bdc9eacabed 100644 (file)
@@ -216,22 +216,25 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
 
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
-                        enum mlx5_port_status status)
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+                              enum mlx5_port_status status)
 {
        u32 in[MLX5_ST_SZ_DW(paos_reg)];
        u32 out[MLX5_ST_SZ_DW(paos_reg)];
 
        memset(in, 0, sizeof(in));
 
+       MLX5_SET(paos_reg, in, local_port, 1);
        MLX5_SET(paos_reg, in, admin_status, status);
        MLX5_SET(paos_reg, in, ase, 1);
 
        return mlx5_core_access_reg(dev, in, sizeof(in), out,
                                    sizeof(out), MLX5_REG_PAOS, 0, 1);
 }
+EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
 
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+                                enum mlx5_port_status *status)
 {
        u32 in[MLX5_ST_SZ_DW(paos_reg)];
        u32 out[MLX5_ST_SZ_DW(paos_reg)];
@@ -239,14 +242,17 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
 
        memset(in, 0, sizeof(in));
 
+       MLX5_SET(paos_reg, in, local_port, 1);
+
        err = mlx5_core_access_reg(dev, in, sizeof(in), out,
                                   sizeof(out), MLX5_REG_PAOS, 0, 0);
        if (err)
                return err;
 
-       *status = MLX5_GET(paos_reg, out, oper_status);
+       *status = MLX5_GET(paos_reg, out, admin_status);
        return err;
 }
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
 
 static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
                                int *max_mtu, int *oper_mtu, u8 port)
@@ -328,3 +334,45 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+{
+       u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+       u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(pfcc_reg, in, local_port, 1);
+       MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+       MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PFCC, 0, 1);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+                         u32 *rx_pause, u32 *tx_pause)
+{
+       u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+       u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(pfcc_reg, in, local_port, 1);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PFCC, 0, 0);
+       if (err)
+               return err;
+
+       if (rx_pause)
+               *rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+
+       if (tx_pause)
+               *tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
index c4f3f74908ec220254137ea7893cd373c6c3b507..b4c87c7b0cf01470ac4ac79a418c174c3f0fc820 100644 (file)
@@ -163,6 +163,18 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
        return err;
 }
 
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+                        int inlen)
+{
+       u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+
+       MLX5_SET(modify_tir_in, in, tirn, tirn);
+       MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
+
+       memset(out, 0, sizeof(out));
+       return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
 void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
 {
        u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
@@ -375,6 +387,18 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
        return err;
 }
 
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+                        int inlen)
+{
+       u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+
+       MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+       MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+
+       memset(out, 0, sizeof(out));
+       return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
 void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
 {
        u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
index 10bd75e7d9b1dd3f44fab37bc274bbe219040677..74cae51436e4f0658ff83bd2962c46d82fd49fa7 100644 (file)
@@ -45,6 +45,8 @@ int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
 void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
 int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
                         u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+                        int inlen);
 void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
 int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
                         u32 *tisn);
@@ -63,6 +65,8 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
 
 int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
                         u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+                        int inlen);
 void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
 
 #endif /* __TRANSOBJ_H__ */
index 8d1080da49b5707ac9fd55bccb500a3a46f39220..2941d9c5ae486250f340915901464b22cc5aef93 100644 (file)
@@ -12,7 +12,7 @@ config MLXSW_CORE
 
 config MLXSW_PCI
        tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
-       depends on PCI && MLXSW_CORE
+       depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
        default m
        ---help---
          This is PCI bus implementation for Mellanox Technologies Switch ASICs.
index ad66ae44a0d7acbe338456c07c6475aa714fab8b..09325b72d52409dbb5528aa139aa44e1a3ddd873 100644 (file)
@@ -865,6 +865,16 @@ static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
        return container_of(driver_priv, struct mlxsw_core, driver_priv);
 }
 
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+                                 const struct mlxsw_tx_info *tx_info)
+{
+       struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+       return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
+                                                 tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
+
 int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
                            const struct mlxsw_tx_info *tx_info)
 {
@@ -1063,7 +1073,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
                        mlxsw_core->emad.tid - 1);
                mlxsw_core_buf_dump_dbg(mlxsw_core,
                                        mlxsw_core->emad.resp_skb->data,
-                                       skb->len);
+                                       mlxsw_core->emad.resp_skb->len);
 
                dev_kfree_skb(mlxsw_core->emad.resp_skb);
        }
index 2280b319c3621b1d174258d9ebe99130eef0e04f..165808471188567613efde6488b34eca38ed1522 100644 (file)
@@ -73,6 +73,9 @@ struct mlxsw_tx_info {
        bool is_emad;
 };
 
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+                                 const struct mlxsw_tx_info *tx_info);
+
 int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
                            const struct mlxsw_tx_info *tx_info);
 
@@ -177,6 +180,8 @@ struct mlxsw_bus {
        int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
                    const struct mlxsw_config_profile *profile);
        void (*fini)(void *bus_priv);
+       bool (*skb_transmit_busy)(void *bus_priv,
+                                 const struct mlxsw_tx_info *tx_info);
        int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
                            const struct mlxsw_tx_info *tx_info);
        int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
index 4d0ac882bec3ff93d6fd0ecfdb12053e481f38b9..ffd55d030ce28dbf902f6990a0dfb7f96b08a766 100644 (file)
@@ -62,7 +62,7 @@ __mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
        if (item->offset % typesize != 0 ||
            item->step % typesize != 0 ||
            item->in_step_offset % typesize != 0) {
-               pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%lx)\n",
+               pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
                       item->name, item->offset, item->step,
                       item->in_step_offset, typesize);
                BUG();
index 298ead5b42ca4b588f78a872f46e1a19ea679379..a34f4742aa00c38ffeb57becec36b9b1b467d285 100644 (file)
@@ -667,6 +667,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
        char *wqe;
        struct sk_buff *skb;
        struct mlxsw_rx_info rx_info;
+       u16 byte_count;
        int err;
 
        elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
@@ -686,7 +687,10 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
        rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
        rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
 
-       skb_put(skb, mlxsw_pci_cqe_byte_count_get(cqe));
+       byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+       if (mlxsw_pci_cqe_crc_get(cqe))
+               byte_count -= ETH_FCS_LEN;
+       skb_put(skb, byte_count);
        mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
 
 put_new_skb:
@@ -1439,6 +1443,15 @@ mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
        return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
 }
 
+static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
+                                       const struct mlxsw_tx_info *tx_info)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+       struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+
+       return !mlxsw_pci_queue_elem_info_producer_get(q);
+}
+
 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
                                  const struct mlxsw_tx_info *tx_info)
 {
@@ -1621,11 +1634,12 @@ err_in_mbox_map:
 }
 
 static const struct mlxsw_bus mlxsw_pci_bus = {
-       .kind           = "pci",
-       .init           = mlxsw_pci_init,
-       .fini           = mlxsw_pci_fini,
-       .skb_transmit   = mlxsw_pci_skb_transmit,
-       .cmd_exec       = mlxsw_pci_cmd_exec,
+       .kind                   = "pci",
+       .init                   = mlxsw_pci_init,
+       .fini                   = mlxsw_pci_fini,
+       .skb_transmit_busy      = mlxsw_pci_skb_transmit_busy,
+       .skb_transmit           = mlxsw_pci_skb_transmit,
+       .cmd_exec               = mlxsw_pci_cmd_exec,
 };
 
 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
index 887af8459149e34be3a7dda26efc0edc141d4b84..1ef9664b451255bdc7c2d08a1fabffe91256c9aa 100644 (file)
@@ -155,6 +155,12 @@ MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
  */
 MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
 
+/* pci_cqe_crc
+ * Length include CRC. Indicates the length field includes
+ * the packet's CRC.
+ */
+MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+
 /* pci_cqe_e
  * CQE with Error.
  */
index b5a72f8e78b1c059372b84826c77816873339f47..096e1c12175a89481838db432cf9be676d339bb5 100644 (file)
@@ -150,6 +150,64 @@ static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
        mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
 }
 
+/* SSPR - Switch System Port Record Register
+ * -----------------------------------------
+ * Configures the system port to local port mapping.
+ */
+#define MLXSW_REG_SSPR_ID 0x2008
+#define MLXSW_REG_SSPR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sspr = {
+       .id = MLXSW_REG_SSPR_ID,
+       .len = MLXSW_REG_SSPR_LEN,
+};
+
+/* reg_sspr_m
+ * Master - if set, then the record describes the master system port.
+ * This is needed in case a local port is mapped into several system ports
+ * (for multipathing). That number will be reported as the source system
+ * port when packets are forwarded to the CPU. Only one master port is allowed
+ * per local port.
+ *
+ * Note: Must be set for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
+
+/* reg_sspr_local_port
+ * Local port number.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+
+/* reg_sspr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
+
+/* reg_sspr_system_port
+ * Unique identifier within the stacking domain that represents all the ports
+ * that are available in the system (external ports).
+ *
+ * Currently, only single-ASIC configurations are supported, so we default to
+ * 1:1 mapping between system ports and local ports.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
+
+static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+{
+       MLXSW_REG_ZERO(sspr, payload);
+       mlxsw_reg_sspr_m_set(payload, 1);
+       mlxsw_reg_sspr_local_port_set(payload, local_port);
+       mlxsw_reg_sspr_sub_port_set(payload, 0);
+       mlxsw_reg_sspr_system_port_set(payload, local_port);
+}
+
 /* SPMS - Switch Port MSTP/RSTP State Register
  * -------------------------------------------
  * Configures the spanning tree state of a physical port.
@@ -1216,6 +1274,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
                return "SPAD";
        case MLXSW_REG_SMID_ID:
                return "SMID";
+       case MLXSW_REG_SSPR_ID:
+               return "SSPR";
        case MLXSW_REG_SPMS_ID:
                return "SPMS";
        case MLXSW_REG_SFGC_ID:
index 29b46eef97692804b1a6e1ef142a2d77962668db..3e52ee93438c00188d0efe6482cfbd9f686b8ee9 100644 (file)
@@ -245,6 +245,16 @@ static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
        return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
 }
 
+static int
+mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+       mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
+}
+
 static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
                                      bool *p_usable)
 {
@@ -290,37 +300,34 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
                .local_port = mlxsw_sx_port->local_port,
                .is_emad = false,
        };
-       struct sk_buff *skb_old = NULL;
+       u64 len;
        int err;
 
+       if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+               return NETDEV_TX_BUSY;
+
        if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
-               struct sk_buff *skb_new;
+               struct sk_buff *skb_orig = skb;
 
-               skb_old = skb;
-               skb_new = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
-               if (!skb_new) {
+               skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+               if (!skb) {
                        this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
-                       dev_kfree_skb_any(skb_old);
+                       dev_kfree_skb_any(skb_orig);
                        return NETDEV_TX_OK;
                }
-               skb = skb_new;
        }
        mlxsw_sx_txhdr_construct(skb, &tx_info);
+       len = skb->len;
+       /* Due to a race we might fail here because of a full queue. In that
+        * unlikely case we simply drop the packet.
+        */
        err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
-       if (err == -EAGAIN) {
-               if (skb_old)
-                       dev_kfree_skb_any(skb);
-               return NETDEV_TX_BUSY;
-       }
-
-       if (skb_old)
-               dev_kfree_skb_any(skb_old);
 
        if (!err) {
                pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
                u64_stats_update_begin(&pcpu_stats->syncp);
                pcpu_stats->tx_packets++;
-               pcpu_stats->tx_bytes += skb->len;
+               pcpu_stats->tx_bytes += len;
                u64_stats_update_end(&pcpu_stats->syncp);
        } else {
                this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
@@ -1001,6 +1008,13 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
                goto port_not_usable;
        }
 
+       err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_system_port_mapping_set;
+       }
+
        err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
        if (err) {
                dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
@@ -1061,6 +1075,7 @@ err_port_stp_state_set:
 err_port_mtu_set:
 err_port_speed_set:
 err_port_swid_set:
+err_port_system_port_mapping_set:
 port_not_usable:
 err_port_module_check:
 err_dev_addr_get:
@@ -1079,6 +1094,7 @@ static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
        unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
        mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
        free_percpu(mlxsw_sx_port->pcpu_stats);
+       free_netdev(mlxsw_sx_port->dev);
 }
 
 static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
index f78909a00f150edfd76065b2b993d9660edaebdd..09d2e16fd6b00bfdd0c20fc10c64abd03c29a935 100644 (file)
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
 
        sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
                tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
-       err = dma_mapping_error(adapter->dev,
-               sg_dma_address(&tx_ctl->sg));
-       if (err) {
+       if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+               err = -ENOMEM;
                sg_dma_address(&tx_ctl->sg) = 0;
                goto err;
        }
index 055f3763e577baf1fb4edb4d86f919f24edb95da..06bcc734fe8d8680e4e7eb4be6997cc75fc3fbaf 100644 (file)
@@ -24,9 +24,7 @@
 #include <linux/mii.h>
 #include <linux/timer.h>
 #include <linux/irq.h>
-
 #include <linux/vmalloc.h>
-
 #include <linux/io.h>
 #include <asm/byteorder.h>
 #include <linux/bitops.h>
@@ -39,8 +37,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 62
-#define QLCNIC_LINUX_VERSIONID  "5.3.62"
+#define _QLCNIC_LINUX_SUBVERSION 63
+#define QLCNIC_LINUX_VERSIONID  "5.3.63"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -926,6 +924,7 @@ struct qlcnic_mac_vlan_list {
 #define QLCNIC_FW_CAPABILITY_SET_DRV_VER       BIT_5
 #define QLCNIC_FW_CAPABILITY_2_BEACON          BIT_7
 #define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG    BIT_9
+#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP  BIT_13
 
 #define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD  BIT_0
 #define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD  BIT_1
@@ -2291,8 +2290,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
 
 #define PCI_DEVICE_ID_QLOGIC_QLE824X           0x8020
 #define PCI_DEVICE_ID_QLOGIC_QLE834X           0x8030
-#define PCI_DEVICE_ID_QLOGIC_QLE8830           0x8830
 #define PCI_DEVICE_ID_QLOGIC_VF_QLE834X        0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE8830           0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30                0x8C30
 #define PCI_DEVICE_ID_QLOGIC_QLE844X           0x8040
 #define PCI_DEVICE_ID_QLOGIC_VF_QLE844X        0x8440
 
@@ -2319,7 +2319,8 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
                  (device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
                  (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
                  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
-                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
 
        return status;
 }
@@ -2335,7 +2336,8 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
        bool status;
 
        status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
-                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
 
        return status;
 }
@@ -2351,7 +2353,8 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
 {
        unsigned short device = adapter->pdev->device;
 
-       return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+       return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+               (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
 }
 
 static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
index 840bf36b5e9d0d8ac5ddca14cc5e1afca26191f8..5ab3adf88166c5d2ef18c64824e078b756b41060 100644 (file)
@@ -5,14 +5,15 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
 #include <linux/if_vlan.h>
 #include <linux/ipv6.h>
 #include <linux/ethtool.h>
 #include <linux/interrupt.h>
 #include <linux/aer.h>
 
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+
 static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
 static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
 static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
@@ -118,6 +119,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
        {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
        {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
        {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+       {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1},
 };
 
 const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -3513,6 +3515,31 @@ out:
        qlcnic_free_mbx_args(&cmd);
 }
 
+#define QLCNIC_83XX_ADD_PORT0          BIT_0
+#define QLCNIC_83XX_ADD_PORT1          BIT_1
+#define QLCNIC_83XX_EXTENDED_MEM_SIZE  13 /* In MB */
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_cmd_args cmd;
+       int err;
+
+       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP);
+       if (err)
+               return err;
+
+       cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
+       cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+       cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+
+       err = qlcnic_issue_cmd(adapter, &cmd);
+       if (err)
+               dev_err(&adapter->pdev->dev,
+                       "failed to issue extend iSCSI minidump capability\n");
+
+       return err;
+}
+
 int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
 {
        u32 major, minor, sub;
index 69f828eb42cf3762f525ee492d0abe9d5d33d1e7..331ae2c20f40395959fc24dd35bd6fc8868c1c07 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/types.h>
 #include <linux/etherdevice.h>
+
 #include "qlcnic_hw.h"
 
 #define QLCNIC_83XX_BAR0_LENGTH 0x4000
@@ -626,6 +627,7 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
 int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
index 753ea8bad953c3a75487e3a544cc4106d4a46f68..bf892160dd5f06971d6623f1107e21fe9ec21cb7 100644 (file)
@@ -1384,7 +1384,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
        size_t size;
        u64 addr;
 
-       temp = kzalloc(fw->size, GFP_KERNEL);
+       temp = vzalloc(fw->size);
        if (!temp) {
                release_firmware(fw);
                fw_info->fw = NULL;
@@ -1430,7 +1430,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
 exit:
        release_firmware(fw);
        fw_info->fw = NULL;
-       kfree(temp);
+       vfree(temp);
 
        return ret;
 }
index 75ee9e4ced51f50ba5017153cd072c2666ffa36f..509b596cf1e8612436af6886dc0e2a6e78acdc1d 100644 (file)
@@ -5,13 +5,13 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
-#include "qlcnic.h"
-#include "qlcnic_hdr.h"
-
 #include <linux/slab.h>
 #include <net/ip.h>
 #include <linux/bitops.h>
 
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
 #define MASK(n) ((1ULL<<(n))-1)
 #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
 
index cbe2399c30a0d11e9621bed426b9b45318187901..4bb33af8e2b3a956db02847bfebfa6ef2362bb3b 100644 (file)
@@ -109,6 +109,7 @@ enum qlcnic_regs {
 #define QLCNIC_CMD_GET_LED_CONFIG              0x6A
 #define QLCNIC_CMD_83XX_SET_DRV_VER            0x6F
 #define QLCNIC_CMD_ADD_RCV_RINGS               0x0B
+#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP  0x37
 
 #define QLCNIC_INTRPT_INTX                     1
 #define QLCNIC_INTRPT_MSIX                     3
index 7dbab3c20db5811d333f8f817769b44b79d57bbf..8b08b20e8b305fb5b98b6da2b39047a5f1a5978d 100644 (file)
@@ -7,11 +7,6 @@
 
 #include <linux/vmalloc.h>
 #include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
-#include "qlcnic_hw.h"
-
 #include <linux/swab.h>
 #include <linux/dma-mapping.h>
 #include <linux/if_vlan.h>
 #include <net/vxlan.h>
 #endif
 
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
 MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
@@ -111,8 +110,9 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
 static const struct pci_device_id qlcnic_pci_tbl[] = {
        ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
        ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
-       ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
        ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30),
        ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
        ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
        {0,}
@@ -1149,6 +1149,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
        case PCI_DEVICE_ID_QLOGIC_QLE844X:
        case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
        case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
                *bar = QLCNIC_83XX_BAR0_LENGTH;
                break;
        default:
@@ -2491,6 +2492,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                qlcnic_83xx_register_map(ahw);
                break;
        case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
        case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
                qlcnic_sriov_vf_register_map(ahw);
                break;
index 332bb8a3f43060bea2ca65991e485e7d8aec1564..cda9e604a95f68d61227808779a93abf1c400342 100644 (file)
@@ -5,13 +5,13 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
+#include <net/ip.h>
+
 #include "qlcnic.h"
 #include "qlcnic_hdr.h"
 #include "qlcnic_83xx_hw.h"
 #include "qlcnic_hw.h"
 
-#include <net/ip.h>
-
 #define QLC_83XX_MINIDUMP_FLASH                0x520000
 #define QLC_83XX_OCM_INDEX                     3
 #define QLC_83XX_PCI_INDEX                     0
@@ -1388,27 +1388,60 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
        fw_dump->clr = 1;
        snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
        netdev_info(adapter->netdev,
-                   "Dump data %d bytes captured, template header size %d bytes\n",
-                   fw_dump->size, fw_dump->tmpl_hdr_size);
+                   "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
+                   fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
+                   fw_dump->tmpl_hdr);
        /* Send a udev event to notify availability of FW dump */
        kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
 
        return 0;
 }
 
+static inline bool
+qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
+{
+       /* For special adapters (with 0x8830 device ID), where iSCSI firmware
+        * dump needs to be captured as part of regular firmware dump
+        * collection process, firmware exports it's capability through
+        * capability registers
+        */
+       return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
+               (adapter->ahw->extra_capability[0] &
+                QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
+}
+
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
 {
        u32 prev_version, current_version;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
        struct pci_dev *pdev = adapter->pdev;
+       bool extended = false;
 
        prev_version = adapter->fw_version;
        current_version = qlcnic_83xx_get_fw_version(adapter);
 
        if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
                vfree(fw_dump->tmpl_hdr);
+
+               if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+                       extended = !qlcnic_83xx_extend_md_capab(adapter);
+
                if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
                        dev_info(&pdev->dev, "Supports FW dump capability\n");
+
+               /* Once we have minidump template with extended iSCSI dump
+                * capability, update the minidump capture mask to 0x1f as
+                * per FW requirement
+                */
+               if (extended) {
+                       struct qlcnic_83xx_dump_template_hdr *hdr;
+
+                       hdr = fw_dump->tmpl_hdr;
+                       hdr->drv_cap_mask = 0x1f;
+                       fw_dump->cap_mask = 0x1f;
+                       dev_info(&pdev->dev,
+                                "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
+               }
        }
 }
index 4677b2edccca79fa1072e8a67315add790271c93..017d8c2c8285abe53eddedc9495fe8a75758f04d 100644 (file)
@@ -8,10 +8,11 @@
 #ifndef _QLCNIC_83XX_SRIOV_H_
 #define _QLCNIC_83XX_SRIOV_H_
 
-#include "qlcnic.h"
 #include <linux/types.h>
 #include <linux/pci.h>
 
+#include "qlcnic.h"
+
 extern const u32 qlcnic_83xx_reg_tbl[];
 extern const u32 qlcnic_83xx_ext_reg_tbl[];
 
index e6312465fe4584e0dd4f90b8f914c5e0088ffeaa..546cd5f1c85aeba1d2d74f01986995858f443206 100644 (file)
@@ -5,10 +5,11 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
+#include <linux/types.h>
+
 #include "qlcnic_sriov.h"
 #include "qlcnic.h"
 #include "qlcnic_83xx_hw.h"
-#include <linux/types.h>
 
 #define QLC_BC_COMMAND 0
 #define QLC_BC_RESPONSE        1
index a29538b86edfcac99010cedbea1fff0a29db799b..afd687e5e77904fa73f103495fc2b5606a200942 100644 (file)
@@ -5,9 +5,10 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
+#include <linux/types.h>
+
 #include "qlcnic_sriov.h"
 #include "qlcnic.h"
-#include <linux/types.h>
 
 #define QLCNIC_SRIOV_VF_MAX_MAC 7
 #define QLC_VF_MIN_TX_RATE     100
index 05c28f2c6df702ff5ccb7d8bf026f73a80cafe67..ccbb04503b2766885019d53fc0e14573302829bf 100644 (file)
@@ -7,10 +7,6 @@
 
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_hw.h"
-
 #include <linux/swab.h>
 #include <linux/dma-mapping.h>
 #include <net/ip.h>
@@ -24,6 +20,9 @@
 #include <linux/hwmon-sysfs.h>
 #endif
 
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
 int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
 {
        return -EOPNOTSUPP;
index 3df51faf18ae3ba8ce6bb7f49e6f51e4da1be738..f790f61ea78a2b4f1008da82eca29132ff5bdcc0 100644 (file)
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_46:
        case RTL_GIGA_MAC_VER_47:
        case RTL_GIGA_MAC_VER_48:
+               RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               break;
        case RTL_GIGA_MAC_VER_49:
        case RTL_GIGA_MAC_VER_50:
        case RTL_GIGA_MAC_VER_51:
-               RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
                break;
        default:
                RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
index 4cd5a71ad45e577d8cc4fb68c6b0c6d3d20e9b28..a7cb74ac47586dfaa3205609e0bf3c0a1225ffd7 100644 (file)
@@ -4264,6 +4264,16 @@ static int rocker_port_change_proto_down(struct net_device *dev,
        return 0;
 }
 
+static void rocker_port_neigh_destroy(struct neighbour *n)
+{
+       struct rocker_port *rocker_port = netdev_priv(n->dev);
+       int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
+       __be32 ip_addr = *(__be32 *)n->primary_key;
+
+       rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+                              flags, ip_addr, n->ha);
+}
+
 static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_open                       = rocker_port_open,
        .ndo_stop                       = rocker_port_stop,
@@ -4278,6 +4288,7 @@ static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_fdb_dump                   = switchdev_port_fdb_dump,
        .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
        .ndo_change_proto_down          = rocker_port_change_proto_down,
+       .ndo_neigh_destroy              = rocker_port_neigh_destroy,
 };
 
 /********************
@@ -4544,6 +4555,7 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
                if (found->key.pport != rocker_port->pport)
                        continue;
                fdb->addr = found->key.addr;
+               fdb->ndm_state = NUD_REACHABLE;
                fdb->vid = rocker_port_vlan_to_vid(rocker_port,
                                                   found->key.vlan_id);
                err = obj->cb(rocker_port->dev, obj);
@@ -4926,6 +4938,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
                rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
                                   ROCKER_OP_FLAG_REMOVE);
                unregister_netdev(rocker_port->dev);
+               free_netdev(rocker_port->dev);
        }
        kfree(rocker->ports);
 }
@@ -4988,7 +5001,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
 
        err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
        if (err) {
-               dev_err(&pdev->dev, "install ig port table failed\n");
+               netdev_err(rocker_port->dev, "install ig port table failed\n");
                goto err_port_ig_tbl;
        }
 
@@ -5008,6 +5021,7 @@ err_untagged_vlan:
        rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
                           ROCKER_OP_FLAG_REMOVE);
 err_port_ig_tbl:
+       rocker->ports[port_number] = NULL;
        unregister_netdev(dev);
 err_register_netdev:
        free_netdev(dev);
@@ -5180,7 +5194,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err_probe_ports;
        }
 
-       dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
+       dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
+                (int)sizeof(rocker->hw.id), &rocker->hw.id);
 
        return 0;
 
index 959aeeade0c97b8cbf5ee27024ecda185c6258b2..6eef3251d8333233445122ff6321091b6292155a 100644 (file)
@@ -59,7 +59,9 @@
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_net.h>
+#include <linux/acpi.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 
 #include "smsc911x.h"
 
@@ -2362,59 +2364,48 @@ static const struct smsc911x_ops shifted_smsc911x_ops = {
        .tx_writefifo = smsc911x_tx_writefifo_shift,
 };
 
-#ifdef CONFIG_OF
-static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config,
-                                   struct device_node *np)
+static int smsc911x_probe_config(struct smsc911x_platform_config *config,
+                                struct device *dev)
 {
-       const char *mac;
+       int phy_interface;
        u32 width = 0;
 
-       if (!np)
-               return -ENODEV;
+       phy_interface = device_get_phy_mode(dev);
+       if (phy_interface < 0)
+               return phy_interface;
 
-       config->phy_interface = of_get_phy_mode(np);
+       config->phy_interface = phy_interface;
 
-       mac = of_get_mac_address(np);
-       if (mac)
-               memcpy(config->mac, mac, ETH_ALEN);
+       device_get_mac_address(dev, config->mac, ETH_ALEN);
 
-       of_property_read_u32(np, "reg-shift", &config->shift);
+       device_property_read_u32(dev, "reg-shift", &config->shift);
 
-       of_property_read_u32(np, "reg-io-width", &width);
+       device_property_read_u32(dev, "reg-io-width", &width);
        if (width == 4)
                config->flags |= SMSC911X_USE_32BIT;
        else
                config->flags |= SMSC911X_USE_16BIT;
 
-       if (of_get_property(np, "smsc,irq-active-high", NULL))
+       if (device_property_present(dev, "smsc,irq-active-high"))
                config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH;
 
-       if (of_get_property(np, "smsc,irq-push-pull", NULL))
+       if (device_property_present(dev, "smsc,irq-push-pull"))
                config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL;
 
-       if (of_get_property(np, "smsc,force-internal-phy", NULL))
+       if (device_property_present(dev, "smsc,force-internal-phy"))
                config->flags |= SMSC911X_FORCE_INTERNAL_PHY;
 
-       if (of_get_property(np, "smsc,force-external-phy", NULL))
+       if (device_property_present(dev, "smsc,force-external-phy"))
                config->flags |= SMSC911X_FORCE_EXTERNAL_PHY;
 
-       if (of_get_property(np, "smsc,save-mac-address", NULL))
+       if (device_property_present(dev, "smsc,save-mac-address"))
                config->flags |= SMSC911X_SAVE_MAC_ADDRESS;
 
        return 0;
 }
-#else
-static inline int smsc911x_probe_config_dt(
-                               struct smsc911x_platform_config *config,
-                               struct device_node *np)
-{
-       return -ENODEV;
-}
-#endif /* CONFIG_OF */
 
 static int smsc911x_drv_probe(struct platform_device *pdev)
 {
-       struct device_node *np = pdev->dev.of_node;
        struct net_device *dev;
        struct smsc911x_data *pdata;
        struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
@@ -2478,7 +2469,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
                goto out_disable_resources;
        }
 
-       retval = smsc911x_probe_config_dt(&pdata->config, np);
+       retval = smsc911x_probe_config(&pdata->config, &pdev->dev);
        if (retval && config) {
                /* copy config parameters across to pdata */
                memcpy(&pdata->config, config, sizeof(pdata->config));
@@ -2654,6 +2645,12 @@ static const struct of_device_id smsc911x_dt_ids[] = {
 MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
 #endif
 
+static const struct acpi_device_id smsc911x_acpi_match[] = {
+       { "ARMH9118", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
+
 static struct platform_driver smsc911x_driver = {
        .probe = smsc911x_drv_probe,
        .remove = smsc911x_drv_remove,
@@ -2661,6 +2658,7 @@ static struct platform_driver smsc911x_driver = {
                .name   = SMSC_CHIPNAME,
                .pm     = SMSC911X_PM_OPS,
                .of_match_table = of_match_ptr(smsc911x_dt_ids),
+               .acpi_match_table = ACPI_PTR(smsc911x_acpi_match),
        },
 };
 
index 333489f0fd24d80ec5d09584b4cbb489eb1a880d..9d89bdbf029f4e0ba908bfa8f199514bd3eb026c 100644 (file)
@@ -42,7 +42,7 @@
 #define NSS_COMMON_CLK_DIV_MASK                        0x7f
 
 #define NSS_COMMON_CLK_SRC_CTRL                        0x14
-#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x)      (1 << x)
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x)      (x)
 /* Mode is coded on 1 bit but is different depending on the MAC ID:
  * MAC0: QSGMII=0 RGMII=1
  * MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -308,7 +308,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
 
        /* Configure the clock src according to the mode */
        regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
-       val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+       val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
        switch (gmac->phy_mode) {
        case PHY_INTERFACE_MODE_RGMII:
                val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
index d155bf2573cd0ef00abc133074831cccc7f937b1..8fc90f1c872c6fe5afe2105aae62c54d8283800f 100644 (file)
@@ -365,7 +365,8 @@ struct cpsw_priv {
        spinlock_t                      lock;
        struct platform_device          *pdev;
        struct net_device               *ndev;
-       struct napi_struct              napi;
+       struct napi_struct              napi_rx;
+       struct napi_struct              napi_tx;
        struct device                   *dev;
        struct cpsw_platform_data       data;
        struct cpsw_ss_regs __iomem     *regs;
@@ -386,10 +387,12 @@ struct cpsw_priv {
        struct cpsw_ale                 *ale;
        bool                            rx_pause;
        bool                            tx_pause;
+       bool                            quirk_irq;
+       bool                            rx_irq_disabled;
+       bool                            tx_irq_disabled;
        /* snapshot of IRQ numbers */
        u32 irqs_table[4];
        u32 num_irqs;
-       bool irq_enabled;
        struct cpts *cpts;
        u32 emac_port;
 };
@@ -752,13 +755,15 @@ static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
 {
        struct cpsw_priv *priv = dev_id;
 
+       writel(0, &priv->wr_regs->tx_en);
        cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-       cpdma_chan_process(priv->txch, 128);
 
-       priv = cpsw_get_slave_priv(priv, 1);
-       if (priv)
-               cpdma_chan_process(priv->txch, 128);
+       if (priv->quirk_irq) {
+               disable_irq_nosync(priv->irqs_table[1]);
+               priv->tx_irq_disabled = true;
+       }
 
+       napi_schedule(&priv->napi_tx);
        return IRQ_HANDLED;
 }
 
@@ -767,43 +772,49 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
        struct cpsw_priv *priv = dev_id;
 
        cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+       writel(0, &priv->wr_regs->rx_en);
 
-       cpsw_intr_disable(priv);
-       if (priv->irq_enabled == true) {
+       if (priv->quirk_irq) {
                disable_irq_nosync(priv->irqs_table[0]);
-               priv->irq_enabled = false;
+               priv->rx_irq_disabled = true;
        }
 
-       if (netif_running(priv->ndev)) {
-               napi_schedule(&priv->napi);
-               return IRQ_HANDLED;
-       }
+       napi_schedule(&priv->napi_rx);
+       return IRQ_HANDLED;
+}
 
-       priv = cpsw_get_slave_priv(priv, 1);
-       if (!priv)
-               return IRQ_NONE;
+static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+       struct cpsw_priv        *priv = napi_to_priv(napi_tx);
+       int                     num_tx;
 
-       if (netif_running(priv->ndev)) {
-               napi_schedule(&priv->napi);
-               return IRQ_HANDLED;
+       num_tx = cpdma_chan_process(priv->txch, budget);
+       if (num_tx < budget) {
+               napi_complete(napi_tx);
+               writel(0xff, &priv->wr_regs->tx_en);
+               if (priv->quirk_irq && priv->tx_irq_disabled) {
+                       priv->tx_irq_disabled = false;
+                       enable_irq(priv->irqs_table[1]);
+               }
        }
-       return IRQ_NONE;
+
+       if (num_tx)
+               cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
+
+       return num_tx;
 }
 
-static int cpsw_poll(struct napi_struct *napi, int budget)
+static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
 {
-       struct cpsw_priv        *priv = napi_to_priv(napi);
+       struct cpsw_priv        *priv = napi_to_priv(napi_rx);
        int                     num_rx;
 
        num_rx = cpdma_chan_process(priv->rxch, budget);
        if (num_rx < budget) {
-               struct cpsw_priv *prim_cpsw;
-
-               napi_complete(napi);
-               cpsw_intr_enable(priv);
-               prim_cpsw = cpsw_get_slave_priv(priv, 0);
-               if (prim_cpsw->irq_enabled == false) {
-                       prim_cpsw->irq_enabled = true;
+               napi_complete(napi_rx);
+               writel(0xff, &priv->wr_regs->rx_en);
+               if (priv->quirk_irq && priv->rx_irq_disabled) {
+                       priv->rx_irq_disabled = false;
                        enable_irq(priv->irqs_table[0]);
                }
        }
@@ -1230,7 +1241,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
 static int cpsw_ndo_open(struct net_device *ndev)
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
-       struct cpsw_priv *prim_cpsw;
        int i, ret;
        u32 reg;
 
@@ -1260,6 +1270,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
                                  ALE_ALL_PORTS << priv->host_port, 0, 0);
 
        if (!cpsw_common_res_usage_state(priv)) {
+               struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
                /* setup tx dma to fixed prio and zero offset */
                cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
                cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
@@ -1273,6 +1285,19 @@ static int cpsw_ndo_open(struct net_device *ndev)
                /* Enable internal fifo flow control */
                writel(0x7, &priv->regs->flow_control);
 
+               napi_enable(&priv_sl0->napi_rx);
+               napi_enable(&priv_sl0->napi_tx);
+
+               if (priv_sl0->tx_irq_disabled) {
+                       priv_sl0->tx_irq_disabled = false;
+                       enable_irq(priv->irqs_table[1]);
+               }
+
+               if (priv_sl0->rx_irq_disabled) {
+                       priv_sl0->rx_irq_disabled = false;
+                       enable_irq(priv->irqs_table[0]);
+               }
+
                if (WARN_ON(!priv->data.rx_descs))
                        priv->data.rx_descs = 128;
 
@@ -1311,18 +1336,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
                cpsw_set_coalesce(ndev, &coal);
        }
 
-       napi_enable(&priv->napi);
        cpdma_ctlr_start(priv->dma);
        cpsw_intr_enable(priv);
 
-       prim_cpsw = cpsw_get_slave_priv(priv, 0);
-       if (prim_cpsw->irq_enabled == false) {
-               if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
-                       prim_cpsw->irq_enabled = true;
-                       enable_irq(prim_cpsw->irqs_table[0]);
-               }
-       }
-
        if (priv->data.dual_emac)
                priv->slaves[priv->emac_port].open_stat = true;
        return 0;
@@ -1341,10 +1357,13 @@ static int cpsw_ndo_stop(struct net_device *ndev)
 
        cpsw_info(priv, ifdown, "shutting down cpsw device\n");
        netif_stop_queue(priv->ndev);
-       napi_disable(&priv->napi);
        netif_carrier_off(priv->ndev);
 
        if (cpsw_common_res_usage_state(priv) <= 1) {
+               struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
+               napi_disable(&priv_sl0->napi_rx);
+               napi_disable(&priv_sl0->napi_tx);
                cpts_unregister(priv->cpts);
                cpsw_intr_disable(priv);
                cpdma_ctlr_stop(priv->dma);
@@ -2127,7 +2146,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
 
        ndev->netdev_ops = &cpsw_netdev_ops;
        ndev->ethtool_ops = &cpsw_ethtool_ops;
-       netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
 
        /* register the network device */
        SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2141,6 +2159,44 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
        return ret;
 }
 
+#define CPSW_QUIRK_IRQ         BIT(0)
+
+static struct platform_device_id cpsw_devtype[] = {
+       {
+               /* keep it for existing comaptibles */
+               .name = "cpsw",
+               .driver_data = CPSW_QUIRK_IRQ,
+       }, {
+               .name = "am335x-cpsw",
+               .driver_data = CPSW_QUIRK_IRQ,
+       }, {
+               .name = "am4372-cpsw",
+               .driver_data = 0,
+       }, {
+               .name = "dra7-cpsw",
+               .driver_data = 0,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(platform, cpsw_devtype);
+
+enum ti_cpsw_type {
+       CPSW = 0,
+       AM335X_CPSW,
+       AM4372_CPSW,
+       DRA7_CPSW,
+};
+
+static const struct of_device_id cpsw_of_mtable[] = {
+       { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], },
+       { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], },
+       { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], },
+       { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
 static int cpsw_probe(struct platform_device *pdev)
 {
        struct cpsw_platform_data       *data;
@@ -2150,6 +2206,7 @@ static int cpsw_probe(struct platform_device *pdev)
        struct cpsw_ale_params          ale_params;
        void __iomem                    *ss_regs;
        struct resource                 *res, *ss_res;
+       const struct of_device_id       *of_id;
        u32 slave_offset, sliver_offset, slave_size;
        int ret = 0, i;
        int irq;
@@ -2169,7 +2226,6 @@ static int cpsw_probe(struct platform_device *pdev)
        priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
        priv->rx_packet_max = max(rx_packet_max, 128);
        priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
-       priv->irq_enabled = true;
        if (!priv->cpts) {
                dev_err(&pdev->dev, "error allocating cpts\n");
                ret = -ENOMEM;
@@ -2341,6 +2397,13 @@ static int cpsw_probe(struct platform_device *pdev)
                goto clean_ale_ret;
        }
 
+       of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
+       if (of_id) {
+               pdev->id_entry = of_id->data;
+               if (pdev->id_entry->driver_data)
+                       priv->quirk_irq = true;
+       }
+
        /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
         * MISC IRQs which are always kept disabled with this driver so
         * we will not request them.
@@ -2380,7 +2443,8 @@ static int cpsw_probe(struct platform_device *pdev)
 
        ndev->netdev_ops = &cpsw_netdev_ops;
        ndev->ethtool_ops = &cpsw_ethtool_ops;
-       netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+       netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+       netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
 
        /* register the network device */
        SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2504,12 +2568,6 @@ static int cpsw_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
 
-static const struct of_device_id cpsw_of_mtable[] = {
-       { .compatible = "ti,cpsw", },
-       { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
-
 static struct platform_driver cpsw_driver = {
        .driver = {
                .name    = "cpsw",
index a8a730641bbb14e723f841b2b3c13454b53a5491..bb1bb72121c0474b8c1898540a28d21264479d6a 100644 (file)
@@ -85,7 +85,6 @@ struct netcp_intf {
        struct list_head        rxhook_list_head;
        unsigned int            rx_queue_id;
        void                    *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
-       u32                     rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
        struct napi_struct      rx_napi;
        struct napi_struct      tx_napi;
 
index 29ae672917b7b3d8c9324b6a77b64193e903b14f..1a5aca55ea9f1764e59624ddf0703c4f1d2ecb5a 100644 (file)
@@ -34,6 +34,7 @@
 #define NETCP_SOP_OFFSET       (NET_IP_ALIGN + NET_SKB_PAD)
 #define NETCP_NAPI_WEIGHT      64
 #define NETCP_TX_TIMEOUT       (5 * HZ)
+#define NETCP_PACKET_SIZE      (ETH_FRAME_LEN + ETH_FCS_LEN)
 #define NETCP_MIN_PACKET_SIZE  ETH_ZLEN
 #define NETCP_MAX_MCAST_ADDR   16
 
@@ -815,30 +816,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
        if (likely(fdq == 0)) {
                unsigned int primary_buf_len;
                /* Allocate a primary receive queue entry */
-               buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+               buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
                primary_buf_len = SKB_DATA_ALIGN(buf_len) +
                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-               if (primary_buf_len <= PAGE_SIZE) {
-                       bufptr = netdev_alloc_frag(primary_buf_len);
-                       pad[1] = primary_buf_len;
-               } else {
-                       bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
-                                        GFP_DMA32 | __GFP_COLD);
-                       pad[1] = 0;
-               }
+               bufptr = netdev_alloc_frag(primary_buf_len);
+               pad[1] = primary_buf_len;
 
                if (unlikely(!bufptr)) {
-                       dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+                       dev_warn_ratelimited(netcp->ndev_dev,
+                                            "Primary RX buffer alloc failed\n");
                        goto fail;
                }
                dma = dma_map_single(netcp->dev, bufptr, buf_len,
                                     DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(netcp->dev, dma)))
+                       goto fail;
+
                pad[0] = (u32)bufptr;
 
        } else {
                /* Allocate a secondary receive queue entry */
-               page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+               page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
                if (unlikely(!page)) {
                        dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
                        goto fail;
@@ -1021,7 +1020,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
 
        /* Map the linear buffer */
        dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
-       if (unlikely(!dma_addr)) {
+       if (unlikely(dma_mapping_error(dev, dma_addr))) {
                dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
                return NULL;
        }
@@ -1557,8 +1556,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
        knav_queue_disable_notify(netcp->rx_queue);
 
        /* open Rx FDQs */
-       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
-            netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+            ++i) {
                snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
                netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
                if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1952,14 +1951,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
                netcp->rx_queue_depths[0] = 128;
        }
 
-       ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
-                                        netcp->rx_buffer_sizes,
-                                        KNAV_DMA_FDQ_PER_CHAN);
-       if (ret) {
-               dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
-               netcp->rx_buffer_sizes[0] = 1536;
-       }
-
        ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
        if (ret < 0) {
                dev_err(dev, "missing \"rx-pool\" parameter\n");
index 5924d4219e9e953f67ee50cdd4add750b6d42c16..4ca2341d7f067a7cd979defac3fea8c0be1aef87 100644 (file)
 #define NULL           0
 #endif
 
-#ifdef LITTLE_ENDIAN
-#define HWM_REVERSE(x) (x)
-#else
-#define        HWM_REVERSE(x)          ((((x)<<24L)&0xff000000L)       +       \
-                                (((x)<< 8L)&0x00ff0000L)       +       \
-                                (((x)>> 8L)&0x0000ff00L)       +       \
-                                (((x)>>24L)&0x000000ffL))
-#endif
-
 #define C_INDIC                (1L<<25)
 #define A_INDIC                (1L<<26)
 #define        RD_FS_LOCAL     0x80
index 78d49d186e056fc278e59afaad5543f3b9717b84..897e1a3f035bc227fdcf3b181a349dbd49cbd629 100644 (file)
@@ -283,7 +283,6 @@ static void geneve_setup(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &geneve_type);
 
-       dev->tx_queue_len = 0;
        dev->features    |= NETIF_F_LLTX;
        dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
        dev->features    |= NETIF_F_RXCSUM;
@@ -297,7 +296,7 @@ static void geneve_setup(struct net_device *dev)
        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 
        netif_keep_dst(dev);
-       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 }
 
 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
index 2ffbf13471d09ad4c27d8c70fbb4dd3145befa75..216bfd350169a9da723035876d152e89b17c8432 100644 (file)
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
        dev->type = ARPHRD_AX25;
 
        /* Perform the low-level AX25 initialization. */
-       if ((err = ax_open(ax->dev))) {
+       err = ax_open(ax->dev);
+       if (err)
                goto out_free_netdev;
-       }
 
-       if (register_netdev(dev))
+       err = register_netdev(dev);
+       if (err)
                goto out_free_buffers;
 
        /* after register_netdev() - because else printk smashes the kernel */
index 5ce7020ca53004b602df31a11954f1fb6c7e7e23..5fa98f599b3dd47996d064fd1685aede8a86bf63 100644 (file)
@@ -162,6 +162,7 @@ struct netvsc_device_info {
        bool link_state;        /* 0 - link up, 1 - link down */
        int  ring_size;
        u32  max_num_vrss_chns;
+       u32  num_chn;
 };
 
 enum rndis_device_state {
index 7b36d5fecc1f24b95c87477ebd43a06ec5a4f914..2990024b90f972e3e22c56f21d2cce278da651ab 100644 (file)
@@ -770,6 +770,104 @@ static void netvsc_get_channels(struct net_device *net,
        }
 }
 
+static int netvsc_set_channels(struct net_device *net,
+                              struct ethtool_channels *channels)
+{
+       struct net_device_context *net_device_ctx = netdev_priv(net);
+       struct hv_device *dev = net_device_ctx->device_ctx;
+       struct netvsc_device *nvdev = hv_get_drvdata(dev);
+       struct netvsc_device_info device_info;
+       u32 num_chn;
+       u32 max_chn;
+       int ret = 0;
+       bool recovering = false;
+
+       if (!nvdev || nvdev->destroy)
+               return -ENODEV;
+
+       num_chn = nvdev->num_chn;
+       max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
+
+       if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
+               pr_info("vRSS unsupported before NVSP Version 5\n");
+               return -EINVAL;
+       }
+
+       /* We do not support rx, tx, or other */
+       if (!channels ||
+           channels->rx_count ||
+           channels->tx_count ||
+           channels->other_count ||
+           (channels->combined_count < 1))
+               return -EINVAL;
+
+       if (channels->combined_count > max_chn) {
+               pr_info("combined channels too high, using %d\n", max_chn);
+               channels->combined_count = max_chn;
+       }
+
+       ret = netvsc_close(net);
+       if (ret)
+               goto out;
+
+ do_set:
+       nvdev->start_remove = true;
+       rndis_filter_device_remove(dev);
+
+       nvdev->num_chn = channels->combined_count;
+
+       net_device_ctx->device_ctx = dev;
+       hv_set_drvdata(dev, net);
+
+       memset(&device_info, 0, sizeof(device_info));
+       device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
+       device_info.ring_size = ring_size;
+       device_info.max_num_vrss_chns = max_num_vrss_chns;
+
+       ret = rndis_filter_device_add(dev, &device_info);
+       if (ret) {
+               if (recovering) {
+                       netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+                       return ret;
+               }
+               goto recover;
+       }
+
+       nvdev = hv_get_drvdata(dev);
+
+       ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
+       if (ret) {
+               if (recovering) {
+                       netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
+                       return ret;
+               }
+               goto recover;
+       }
+
+       ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
+       if (ret) {
+               if (recovering) {
+                       netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
+                       return ret;
+               }
+               goto recover;
+       }
+
+ out:
+       netvsc_open(net);
+
+       return ret;
+
+ recover:
+       /* If the above failed, we attempt to recover through the same
+        * process but with the original number of channels.
+        */
+       netdev_err(net, "could not set channels, recovering\n");
+       recovering = true;
+       channels->combined_count = num_chn;
+       goto do_set;
+}
+
 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 {
        struct net_device_context *ndevctx = netdev_priv(ndev);
@@ -799,7 +897,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 
        ndevctx->device_ctx = hdev;
        hv_set_drvdata(hdev, ndev);
+
+       memset(&device_info, 0, sizeof(device_info));
        device_info.ring_size = ring_size;
+       device_info.num_chn = nvdev->num_chn;
        device_info.max_num_vrss_chns = max_num_vrss_chns;
        rndis_filter_device_add(hdev, &device_info);
 
@@ -889,6 +990,7 @@ static const struct ethtool_ops ethtool_ops = {
        .get_drvinfo    = netvsc_get_drvinfo,
        .get_link       = ethtool_op_get_link,
        .get_channels   = netvsc_get_channels,
+       .set_channels   = netvsc_set_channels,
 };
 
 static const struct net_device_ops device_ops = {
@@ -1022,6 +1124,7 @@ static int netvsc_probe(struct hv_device *dev,
        net->needed_headroom = max_needed_headroom;
 
        /* Notify the netvsc driver of the new device */
+       memset(&device_info, 0, sizeof(device_info));
        device_info.ring_size = ring_size;
        device_info.max_num_vrss_chns = max_num_vrss_chns;
        ret = rndis_filter_device_add(dev, &device_info);
index 9b8263db49cc30c8a079cd27d0ba08aa67df34cd..5931a799aa17860de8fd8122636cf41db5761122 100644 (file)
@@ -1125,7 +1125,12 @@ int rndis_filter_device_add(struct hv_device *dev,
         */
        node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
        num_possible_rss_qs = cpumask_weight(node_cpu_mask);
-       net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
+       /* We will use the given number of channels if available. */
+       if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
+               net_device->num_chn = device_info->num_chn;
+       else
+               net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
 
        num_rss_qs = net_device->num_chn - 1;
        net_device->num_sc_offered = num_rss_qs;
index d0d5bf6cbb686a357e4e5ed98ad652c0dbf6908a..6422caac8d40d64644aa9c95f32b5661dd37003b 100644 (file)
@@ -97,9 +97,7 @@ struct at86rf230_local {
 
        struct at86rf230_state_change irq;
 
-       bool tx_aret;
        unsigned long cal_timeout;
-       s8 max_frame_retries;
        bool is_tx;
        bool is_tx_from_off;
        u8 tx_retry;
@@ -651,7 +649,7 @@ at86rf230_tx_complete(void *context)
 
        enable_irq(ctx->irq);
 
-       ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
+       ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
 }
 
 static void
@@ -760,17 +758,10 @@ at86rf230_irq_trx_end(struct at86rf230_local *lp)
 {
        if (lp->is_tx) {
                lp->is_tx = 0;
-
-               if (lp->tx_aret)
-                       at86rf230_async_state_change(lp, &lp->irq,
-                                                    STATE_FORCE_TX_ON,
-                                                    at86rf230_tx_trac_status,
-                                                    true);
-               else
-                       at86rf230_async_state_change(lp, &lp->irq,
-                                                    STATE_RX_AACK_ON,
-                                                    at86rf230_tx_complete,
-                                                    true);
+               at86rf230_async_state_change(lp, &lp->irq,
+                                            STATE_FORCE_TX_ON,
+                                            at86rf230_tx_trac_status,
+                                            true);
        } else {
                at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
                                         at86rf230_rx_trac_check, true);
@@ -876,24 +867,16 @@ at86rf230_xmit_start(void *context)
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
 
-       /* In ARET mode we need to go into STATE_TX_ARET_ON after we
-        * are in STATE_TX_ON. The pfad differs here, so we change
-        * the complete handler.
-        */
-       if (lp->tx_aret) {
-               if (lp->is_tx_from_off) {
-                       lp->is_tx_from_off = false;
-                       at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-                                                    at86rf230_write_frame,
-                                                    false);
-               } else {
-                       at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-                                                    at86rf230_xmit_tx_on,
-                                                    false);
-               }
+       /* check if we change from off state */
+       if (lp->is_tx_from_off) {
+               lp->is_tx_from_off = false;
+               at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+                                            at86rf230_write_frame,
+                                            false);
        } else {
                at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-                                            at86rf230_write_frame, false);
+                                            at86rf230_xmit_tx_on,
+                                            false);
        }
 }
 
@@ -1267,15 +1250,8 @@ static int
 at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
 {
        struct at86rf230_local *lp = hw->priv;
-       int rc = 0;
-
-       lp->tx_aret = retries >= 0;
-       lp->max_frame_retries = retries;
 
-       if (retries >= 0)
-               rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
-
-       return rc;
+       return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
 }
 
 static int
index 613dae559925f947586f8f011aad315d0705866c..c5b54a15fc4cb2b59584a492a5b4a9bca190e687 100644 (file)
@@ -833,6 +833,7 @@ static int cc2520_get_platform_data(struct spi_device *spi,
                if (!spi_pdata)
                        return -ENOENT;
                *pdata = *spi_pdata;
+               priv->fifo_pin = pdata->fifo;
                return 0;
        }
 
index 20b58bdecf7540100edc5522e74804e6a7544d95..a9268db4e349fc2e131be0010ecd67e8bb779939 100644 (file)
@@ -520,12 +520,11 @@ static void ipvlan_link_setup(struct net_device *dev)
        ether_setup(dev);
 
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
-       dev->priv_flags |= IFF_UNICAST_FLT;
+       dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
        dev->netdev_ops = &ipvlan_netdev_ops;
        dev->destructor = free_netdev;
        dev->header_ops = &ipvlan_header_ops;
        dev->ethtool_ops = &ipvlan_ethtool_ops;
-       dev->tx_queue_len = 0;
 }
 
 static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
index c76283c2f84a4e7aa21b684770542e9b6202bffd..dc7d970bd1c0baaf1f707df87001da8f59f3d74a 100644 (file)
@@ -165,10 +165,9 @@ static void loopback_setup(struct net_device *dev)
        dev->mtu                = 64 * 1024;
        dev->hard_header_len    = ETH_HLEN;     /* 14   */
        dev->addr_len           = ETH_ALEN;     /* 6    */
-       dev->tx_queue_len       = 0;
        dev->type               = ARPHRD_LOOPBACK;      /* 0x0001*/
        dev->flags              = IFF_LOOPBACK;
-       dev->priv_flags         |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags         |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
        netif_keep_dst(dev);
        dev->hw_features        = NETIF_F_ALL_TSO | NETIF_F_UFO;
        dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST
index 34924dfadd0097608dac20dcf0aced6bb1c9805e..7b7c70e2341eff0391c1e98f21a0e6ca5931a35d 100644 (file)
@@ -130,7 +130,7 @@ static const struct net_device_ops nlmon_ops = {
 static void nlmon_setup(struct net_device *dev)
 {
        dev->type = ARPHRD_NETLINK;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        dev->netdev_ops = &nlmon_ops;
        dev->ethtool_ops = &nlmon_ethtool_ops;
index 3cc316cb7e6be792b06dfc2c520eae9809f1008b..d8757bf9ad755ed6a3114d9d0a9664e59618744a 100644 (file)
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
 
        netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
 
+       if (len < 0) {
+               ndev->stats.rx_errors++;
+               ndev->stats.rx_length_errors++;
+               goto enqueue_again;
+       }
+
        skb_put(skb, len);
        skb->protocol = eth_type_trans(skb, ndev);
        skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
                return;
        }
 
+enqueue_again:
        rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
        if (rc) {
                dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
 
                rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
                                              ndev->mtu + ETH_HLEN);
-               if (rc == -EINVAL) {
+               if (rc) {
                        dev_kfree_skb(skb);
                        goto err;
                }
index 73d347d7cb04c0082b30671cb32986454ccd0dd5..d6111affbcb6cafd282aaae2c76a6f8fc7c704f3 100644 (file)
@@ -44,6 +44,43 @@ static int aquantia_aneg_done(struct phy_device *phydev)
        return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
 }
 
+static int aquantia_config_intr(struct phy_device *phydev)
+{
+       int err;
+
+       if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+               err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
+               if (err < 0)
+                       return err;
+
+               err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
+               if (err < 0)
+                       return err;
+
+               err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
+       } else {
+               err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
+               if (err < 0)
+                       return err;
+
+               err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
+               if (err < 0)
+                       return err;
+
+               err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
+       }
+
+       return err;
+}
+
+static int aquantia_ack_interrupt(struct phy_device *phydev)
+{
+       int reg;
+
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
+       return (reg < 0) ? reg : 0;
+}
+
 static int aquantia_read_status(struct phy_device *phydev)
 {
        int reg;
@@ -85,8 +122,11 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQ1202",
        .features       = PHY_AQUANTIA_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = aquantia_aneg_done,
        .config_aneg    = aquantia_config_aneg,
+       .config_intr    = aquantia_config_intr,
+       .ack_interrupt  = aquantia_ack_interrupt,
        .read_status    = aquantia_read_status,
        .driver         = { .owner = THIS_MODULE,},
 },
@@ -95,8 +135,11 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQ2104",
        .features       = PHY_AQUANTIA_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = aquantia_aneg_done,
        .config_aneg    = aquantia_config_aneg,
+       .config_intr    = aquantia_config_intr,
+       .ack_interrupt  = aquantia_ack_interrupt,
        .read_status    = aquantia_read_status,
        .driver         = { .owner = THIS_MODULE,},
 },
@@ -105,8 +148,11 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQR105",
        .features       = PHY_AQUANTIA_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = aquantia_aneg_done,
        .config_aneg    = aquantia_config_aneg,
+       .config_intr    = aquantia_config_intr,
+       .ack_interrupt  = aquantia_ack_interrupt,
        .read_status    = aquantia_read_status,
        .driver         = { .owner = THIS_MODULE,},
 },
@@ -115,8 +161,11 @@ static struct phy_driver aquantia_driver[] = {
        .phy_id_mask    = 0xfffffff0,
        .name           = "Aquantia AQR405",
        .features       = PHY_AQUANTIA_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
        .aneg_done      = aquantia_aneg_done,
        .config_aneg    = aquantia_config_aneg,
+       .config_intr    = aquantia_config_intr,
+       .ack_interrupt  = aquantia_ack_interrupt,
        .read_status    = aquantia_read_status,
        .driver         = { .owner = THIS_MODULE,},
 },
index 3320a179ee360c6b3e8d90b6355f5cd18c8b4630..e6897b6a8a53190dd0cea06d863fb88f397fd197 100644 (file)
@@ -52,6 +52,7 @@
 #define MII_M1011_PHY_SCR_MDI_X                0x0020
 #define MII_M1011_PHY_SCR_AUTO_CROSS   0x0060
 
+#define MII_M1145_PHY_EXT_ADDR_PAGE    0x16
 #define MII_M1145_PHY_EXT_SR           0x1b
 #define MII_M1145_PHY_EXT_CR           0x14
 #define MII_M1145_RGMII_RX_DELAY       0x0080
@@ -552,6 +553,16 @@ static int m88e1111_config_init(struct phy_device *phydev)
                err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
                if (err < 0)
                        return err;
+
+               /* make sure copper is selected */
+               err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE);
+               if (err < 0)
+                       return err;
+
+               err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE,
+                               err & (~0xff));
+               if (err < 0)
+                       return err;
        }
 
        if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
index 84b1fba58ac3c8efcbbb0bf9311b442ac52614c1..d9728516dac32d935e9b19ffacf2d9431d81d448 100644 (file)
@@ -814,6 +814,7 @@ void phy_state_machine(struct work_struct *work)
        bool needs_aneg = false, do_suspend = false;
        enum phy_state old_state;
        int err = 0;
+       int old_link;
 
        mutex_lock(&phydev->lock);
 
@@ -899,11 +900,18 @@ void phy_state_machine(struct work_struct *work)
                phydev->adjust_link(phydev->attached_dev);
                break;
        case PHY_RUNNING:
-               /* Only register a CHANGE if we are
-                * polling or ignoring interrupts
+               /* Only register a CHANGE if we are polling or ignoring
+                * interrupts and link changed since latest checking.
                 */
-               if (!phy_interrupt_is_valid(phydev))
-                       phydev->state = PHY_CHANGELINK;
+               if (!phy_interrupt_is_valid(phydev)) {
+                       old_link = phydev->link;
+                       err = phy_read_status(phydev);
+                       if (err)
+                               break;
+
+                       if (old_link != phydev->link)
+                               phydev->state = PHY_CHANGELINK;
+               }
                break;
        case PHY_CHANGELINK:
                err = phy_read_status(phydev);
index 45353613b2ed1450f35e41fc13343b5bb6c3f413..43ab691362d48a982a571288f81269473429edae 100644 (file)
@@ -136,6 +136,19 @@ static struct phy_driver realtek_drvs[] = {
                .ack_interrupt  = &rtl821x_ack_interrupt,
                .config_intr    = &rtl8211b_config_intr,
                .driver         = { .owner = THIS_MODULE,},
+       }, {
+               .phy_id         = 0x001cc914,
+               .name           = "RTL8211DN Gigabit Ethernet",
+               .phy_id_mask    = 0x001fffff,
+               .features       = PHY_GBIT_FEATURES,
+               .flags          = PHY_HAS_INTERRUPT,
+               .config_aneg    = genphy_config_aneg,
+               .read_status    = genphy_read_status,
+               .ack_interrupt  = rtl821x_ack_interrupt,
+               .config_intr    = rtl8211e_config_intr,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
+               .driver         = { .owner = THIS_MODULE,},
        }, {
                .phy_id         = 0x001cc915,
                .name           = "RTL8211E Gigabit Ethernet",
@@ -170,6 +183,7 @@ module_phy_driver(realtek_drvs);
 
 static struct mdio_device_id __maybe_unused realtek_tbl[] = {
        { 0x001cc912, 0x001fffff },
+       { 0x001cc914, 0x001fffff },
        { 0x001cc915, 0x001fffff },
        { 0x001cc916, 0x001fffff },
        { }
index c0f6479e19d48e51fd76c3bbce161ffdd7846842..70b08958763a129fff47ad00a1db130c1334f254 100644 (file)
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
 }
 
 /*
- * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
- * other in order to set the ENERGYON bit and exit EDPD mode.  If a link partner
- * does send the pulses within this interval, the PHY will remained powered
- * down.
- *
- * This workaround will manually toggle the PHY on/off upon calls to read_status
- * in order to generate link test pulses if the link is down.  If a link partner
- * is present, it will respond to the pulses, which will cause the ENERGYON bit
- * to be set and will cause the EDPD mode to be exited.
+ * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
+ * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
+ * unstable detection of plugging in Ethernet cable.
+ * This workaround disables Energy Detect Power-Down mode and waiting for
+ * response on link pulses to detect presence of plugged Ethernet cable.
+ * The Energy Detect Power-Down mode is enabled again in the end of procedure to
+ * save approximately 220 mW of power if cable is unplugged.
  */
 static int lan87xx_read_status(struct phy_device *phydev)
 {
        int err = genphy_read_status(phydev);
+       int i;
 
        if (!phydev->link) {
                /* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
                if (rc < 0)
                        return rc;
 
-               /* Sleep 64 ms to allow ~5 link test pulses to be sent */
-               msleep(64);
+               /* Wait max 640 ms to detect energy */
+               for (i = 0; i < 64; i++) {
+                       /* Sleep to allow link test pulses to be sent */
+                       msleep(10);
+                       rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+                       if (rc < 0)
+                               return rc;
+                       if (rc & MII_LAN83C185_ENERGYON)
+                               break;
+               }
 
                /* Re-enable EDPD */
                rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
 
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
+       .read_status    = lan87xx_read_status,
        .config_init    = smsc_phy_config_init,
        .soft_reset     = smsc_phy_reset,
 
index 9d15566521a719b525a28a009f1d999c91a00da2..fa8f5046afe90627242f6d2d523178da653c9427 100644 (file)
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
 static void ppp_ccp_closed(struct ppp *ppp);
 static struct compressor *find_compressor(int type);
 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+                                       struct file *file, int *retp);
 static void init_ppp_file(struct ppp_file *pf, int kind);
-static void ppp_shutdown_interface(struct ppp *ppp);
 static void ppp_destroy_interface(struct ppp *ppp);
 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
                file->private_data = NULL;
                if (pf->kind == INTERFACE) {
                        ppp = PF_TO_PPP(pf);
+                       rtnl_lock();
                        if (file == ppp->owner)
-                               ppp_shutdown_interface(ppp);
+                               unregister_netdevice(ppp->dev);
+                       rtnl_unlock();
                }
                if (atomic_dec_and_test(&pf->refcnt)) {
                        switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                mutex_lock(&ppp_mutex);
                if (pf->kind == INTERFACE) {
                        ppp = PF_TO_PPP(pf);
+                       rtnl_lock();
                        if (file == ppp->owner)
-                               ppp_shutdown_interface(ppp);
+                               unregister_netdevice(ppp->dev);
+                       rtnl_unlock();
                }
                if (atomic_long_read(&file->f_count) < 2) {
                        ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
                /* Create a new ppp unit */
                if (get_user(unit, p))
                        break;
-               ppp = ppp_create_interface(net, unit, &err);
+               ppp = ppp_create_interface(net, unit, file, &err);
                if (!ppp)
                        break;
                file->private_data = &ppp->file;
-               ppp->owner = file;
                err = -EFAULT;
                if (put_user(ppp->file.index, p))
                        break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
 static __net_exit void ppp_exit_net(struct net *net)
 {
        struct ppp_net *pn = net_generic(net, ppp_net_id);
+       struct ppp *ppp;
+       LIST_HEAD(list);
+       int id;
+
+       rtnl_lock();
+       idr_for_each_entry(&pn->units_idr, ppp, id)
+               unregister_netdevice_queue(ppp->dev, &list);
+
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
 
        idr_destroy(&pn->units_idr);
 }
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
        return 0;
 }
 
+static void ppp_dev_uninit(struct net_device *dev)
+{
+       struct ppp *ppp = netdev_priv(dev);
+       struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+       ppp_lock(ppp);
+       ppp->closing = 1;
+       ppp_unlock(ppp);
+
+       mutex_lock(&pn->all_ppp_mutex);
+       unit_put(&pn->units_idr, ppp->file.index);
+       mutex_unlock(&pn->all_ppp_mutex);
+
+       ppp->owner = NULL;
+
+       ppp->file.dead = 1;
+       wake_up_interruptible(&ppp->file.rwait);
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
        .ndo_init        = ppp_dev_init,
+       .ndo_uninit      = ppp_dev_uninit,
        .ndo_start_xmit  = ppp_start_xmit,
        .ndo_do_ioctl    = ppp_net_ioctl,
        .ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
  * or if there is already a unit with the requested number.
  * unit == -1 means allocate a new number.
  */
-static struct ppp *
-ppp_create_interface(struct net *net, int unit, int *retp)
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+                                       struct file *file, int *retp)
 {
        struct ppp *ppp;
        struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
        ppp->mru = PPP_MRU;
        init_ppp_file(&ppp->file, INTERFACE);
        ppp->file.hdrlen = PPP_HDRLEN - 2;      /* don't count proto bytes */
+       ppp->owner = file;
        for (i = 0; i < NUM_NP; ++i)
                ppp->npmode[i] = NPMODE_PASS;
        INIT_LIST_HEAD(&ppp->channels);
@@ -2775,34 +2809,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
        init_waitqueue_head(&pf->rwait);
 }
 
-/*
- * Take down a ppp interface unit - called when the owning file
- * (the one that created the unit) is closed or detached.
- */
-static void ppp_shutdown_interface(struct ppp *ppp)
-{
-       struct ppp_net *pn;
-
-       pn = ppp_pernet(ppp->ppp_net);
-       mutex_lock(&pn->all_ppp_mutex);
-
-       /* This will call dev_close() for us. */
-       ppp_lock(ppp);
-       if (!ppp->closing) {
-               ppp->closing = 1;
-               ppp_unlock(ppp);
-               unregister_netdev(ppp->dev);
-               unit_put(&pn->units_idr, ppp->file.index);
-       } else
-               ppp_unlock(ppp);
-
-       ppp->file.dead = 1;
-       ppp->owner = NULL;
-       wake_up_interruptible(&ppp->file.rwait);
-
-       mutex_unlock(&pn->all_ppp_mutex);
-}
-
 /*
  * Free the memory used by a ppp unit.  This is only called once
  * there are no channels connected to the unit and no file structs
index daa054b3ff03ebf58f32c8bc7b1b012240895176..651d35ea22c5f8335b50c0b0aa75c834eb4c83c5 100644 (file)
@@ -2051,9 +2051,9 @@ static void team_setup(struct net_device *dev)
        dev->netdev_ops = &team_netdev_ops;
        dev->ethtool_ops = &team_ethtool_ops;
        dev->destructor = team_destructor;
-       dev->tx_queue_len = 0;
        dev->flags |= IFF_MULTICAST;
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        /*
         * Indicate we support unicast address filtering. That way core won't
index ec8bd34ce47b5e3cb8d5f1129f564784c649d52b..39364a45af4043880fe3d195bb381fcfb8858e74 100644 (file)
@@ -291,8 +291,6 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
        u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
        int ret;
 
-       BUG_ON(!dev);
-
        if (!buf)
                return -ENOMEM;
 
@@ -319,8 +317,6 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
        u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
        int ret;
 
-       BUG_ON(!dev);
-
        if (!buf)
                return -ENOMEM;
 
@@ -351,10 +347,6 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
        u32 *src;
        u32 *dst;
 
-       BUG_ON(!dev);
-       BUG_ON(!data);
-       BUG_ON(sizeof(struct lan78xx_statstage) != 0xBC);
-
        stats = kmalloc(sizeof(*stats), GFP_KERNEL);
        if (!stats)
                return -ENOMEM;
@@ -687,9 +679,6 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
        u32 val;
        int i, ret;
 
-       BUG_ON(!dev);
-       BUG_ON(!data);
-
        ret = lan78xx_eeprom_confirm_not_busy(dev);
        if (ret)
                return ret;
@@ -737,9 +726,6 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
        u32 val;
        int i, ret;
 
-       BUG_ON(!dev);
-       BUG_ON(!data);
-
        ret = lan78xx_eeprom_confirm_not_busy(dev);
        if (ret)
                return ret;
@@ -2220,20 +2206,10 @@ static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
        spin_lock_irqsave(&list->lock, flags);
        old_state = entry->state;
        entry->state = state;
-       if (!list->prev)
-               BUG_ON(!list->prev);
-       if (!list->next)
-               BUG_ON(!list->next);
-       if (!skb->prev || !skb->next)
-               BUG_ON(true);
 
        __skb_unlink(skb, list);
        spin_unlock(&list->lock);
        spin_lock(&dev->done.lock);
-       if (!dev->done.prev)
-               BUG_ON(!dev->done.prev);
-       if (!dev->done.next)
-               BUG_ON(!dev->done.next);
 
        __skb_queue_tail(&dev->done, skb);
        if (skb_queue_len(&dev->done) == 1)
@@ -2279,8 +2255,7 @@ static void tx_complete(struct urb *urb)
 
        usb_autopm_put_interface_async(dev->intf);
 
-       if (skb)
-               defer_bh(dev, skb, &dev->txq, tx_done);
+       defer_bh(dev, skb, &dev->txq, tx_done);
 }
 
 static void lan78xx_queue_skb(struct sk_buff_head *list,
@@ -2295,13 +2270,15 @@ static void lan78xx_queue_skb(struct sk_buff_head *list,
 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
 {
        struct lan78xx_net *dev = netdev_priv(net);
+       struct sk_buff *skb2 = NULL;
 
-       if (skb)
+       if (skb) {
                skb_tx_timestamp(skb);
+               skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
+       }
 
-       skb = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
-       if (skb) {
-               skb_queue_tail(&dev->txq_pend, skb);
+       if (skb2) {
+               skb_queue_tail(&dev->txq_pend, skb2);
 
                if (skb_queue_len(&dev->txq_pend) > 10)
                        netif_stop_queue(net);
@@ -2748,8 +2725,6 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
                        memcpy(skb->data + pos, skb2->data, skb2->len);
                        pos += roundup(skb2->len, sizeof(u32));
                        dev_kfree_skb(skb2);
-               } else {
-                       BUG_ON(true);
                }
        }
 
@@ -2858,11 +2833,6 @@ static void lan78xx_bh(unsigned long param)
        struct sk_buff *skb;
        struct skb_data *entry;
 
-       if (!dev->done.prev)
-               BUG_ON(!dev->done.prev);
-       if (!dev->done.next)
-               BUG_ON(!dev->done.next);
-
        while ((skb = skb_dequeue(&dev->done))) {
                entry = (struct skb_data *)(skb->cb);
                switch (entry->state) {
@@ -2882,10 +2852,6 @@ static void lan78xx_bh(unsigned long param)
                        netdev_dbg(dev->net, "skb state %d\n", entry->state);
                        return;
                }
-               if (!dev->done.prev)
-                       BUG_ON(!dev->done.prev);
-               if (!dev->done.next)
-                       BUG_ON(!dev->done.next);
        }
 
        if (netif_device_present(dev->net) && netif_running(dev->net)) {
@@ -3156,7 +3122,6 @@ static int lan78xx_probe(struct usb_interface *intf,
 
        return 0;
 
-       usb_set_intfdata(intf, NULL);
 out3:
        lan78xx_unbind(dev, intf);
 out2:
index 1f7a7cd97e50277e48487e18eaeafc9406b27f46..6392ae3c4ab82a5c7314ce219575bd09fe995922 100644 (file)
@@ -786,6 +786,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81b1, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x581d, 4)},    /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
 
        /* 4. Gobi 1000 devices */
index 343592c4315f6397d6a2414457a7d10b532c0eca..0ef4a5ad555739870897bfbb96c7a95847deed20 100644 (file)
@@ -306,6 +306,7 @@ static void veth_setup(struct net_device *dev)
 
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        dev->netdev_ops = &veth_netdev_ops;
        dev->ethtool_ops = &veth_ethtool_ops;
index 66f08f622dc6603026ceb7646878a6878b637d15..9b950f2db836fd6cbc9165e19c525eedbbefaa70 100644 (file)
@@ -40,12 +40,12 @@ module_param(gso, bool, 0444);
 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
 #define GOOD_COPY_LEN  128
 
-/* Weight used for the RX packet size EWMA. The average packet size is used to
- * determine the packet buffer size when refilling RX rings. As the entire RX
- * ring may be refilled at once, the weight is chosen so that the EWMA will be
- * insensitive to short-term, transient changes in packet size.
+/* RX packet size EWMA. The average packet size is used to determine the packet
+ * buffer size when refilling RX rings. As the entire RX ring may be refilled
+ * at once, the weight is chosen so that the EWMA will be insensitive to short-
+ * term, transient changes in packet size.
  */
-#define RECEIVE_AVG_WEIGHT 64
+DECLARE_EWMA(pkt_len, 1, 64)
 
 /* Minimum alignment for mergeable packet buffers. */
 #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
@@ -85,7 +85,7 @@ struct receive_queue {
        struct page *pages;
 
        /* Average packet length for mergeable receive buffers. */
-       struct ewma mrg_avg_pkt_len;
+       struct ewma_pkt_len mrg_avg_pkt_len;
 
        /* Page frag for packet buffer allocation. */
        struct page_frag alloc_frag;
@@ -407,7 +407,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                }
        }
 
-       ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
+       ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
        return head_skb;
 
 err_skb:
@@ -600,12 +600,12 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
        return err;
 }
 
-static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
 {
        const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
        unsigned int len;
 
-       len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
+       len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
                        GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
        return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
 }
@@ -1615,7 +1615,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
                napi_hash_add(&vi->rq[i].napi);
 
                sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
-               ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
+               ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
                sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
        }
 
@@ -1658,7 +1658,7 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
 {
        struct virtnet_info *vi = netdev_priv(queue->dev);
        unsigned int queue_index = get_netdev_rx_queue_index(queue);
-       struct ewma *avg;
+       struct ewma_pkt_len *avg;
 
        BUG_ON(queue_index >= vi->max_queue_pairs);
        avg = &vi->rq[queue_index].mrg_avg_pkt_len;
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* Do we support "hardware" checksums? */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
                /* This opens up the world of extra features. */
-               dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+               dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
                if (csum)
-                       dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+                       dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
 
                if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
                        dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
new file mode 100644 (file)
index 0000000..b3d9c55
--- /dev/null
@@ -0,0 +1,668 @@
+/*
+ * vrf.c: device driver to encapsulate a VRF space
+ *
+ * Copyright (c) 2015 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
+ * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
+ *
+ * Based on dummy, team and ipvlan drivers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/netfilter.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/hashtable.h>
+
+#include <linux/inetdevice.h>
+#include <net/ip.h>
+#include <net/ip_fib.h>
+#include <net/ip6_route.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+#include <net/vrf.h>
+
+#define DRV_NAME       "vrf"
+#define DRV_VERSION    "1.0"
+
+#define vrf_is_slave(dev)   ((dev)->flags & IFF_SLAVE)
+
+#define vrf_master_get_rcu(dev) \
+       ((struct net_device *)rcu_dereference(dev->rx_handler_data))
+
+struct pcpu_dstats {
+       u64                     tx_pkts;
+       u64                     tx_bytes;
+       u64                     tx_drps;
+       u64                     rx_pkts;
+       u64                     rx_bytes;
+       struct u64_stats_sync   syncp;
+};
+
+static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
+{
+       return dst;
+}
+
+static int vrf_ip_local_out(struct sk_buff *skb)
+{
+       return ip_local_out(skb);
+}
+
+static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
+{
+       /* TO-DO: return max ethernet size? */
+       return dst->dev->mtu;
+}
+
+static void vrf_dst_destroy(struct dst_entry *dst)
+{
+       /* our dst lives forever - or until the device is closed */
+}
+
+static unsigned int vrf_default_advmss(const struct dst_entry *dst)
+{
+       return 65535 - 40;
+}
+
+static struct dst_ops vrf_dst_ops = {
+       .family         = AF_INET,
+       .local_out      = vrf_ip_local_out,
+       .check          = vrf_ip_check,
+       .mtu            = vrf_v4_mtu,
+       .destroy        = vrf_dst_destroy,
+       .default_advmss = vrf_default_advmss,
+};
+
+static bool is_ip_rx_frame(struct sk_buff *skb)
+{
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+               return true;
+       }
+       return false;
+}
+
+static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
+{
+       vrf_dev->stats.tx_errors++;
+       kfree_skb(skb);
+}
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
+{
+       struct sk_buff *skb = *pskb;
+
+       if (is_ip_rx_frame(skb)) {
+               struct net_device *dev = vrf_master_get_rcu(skb->dev);
+               struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+               u64_stats_update_begin(&dstats->syncp);
+               dstats->rx_pkts++;
+               dstats->rx_bytes += skb->len;
+               u64_stats_update_end(&dstats->syncp);
+
+               skb->dev = dev;
+
+               return RX_HANDLER_ANOTHER;
+       }
+       return RX_HANDLER_PASS;
+}
+
+static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
+                                                struct rtnl_link_stats64 *stats)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_dstats *dstats;
+               u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+               unsigned int start;
+
+               dstats = per_cpu_ptr(dev->dstats, i);
+               do {
+                       start = u64_stats_fetch_begin_irq(&dstats->syncp);
+                       tbytes = dstats->tx_bytes;
+                       tpkts = dstats->tx_pkts;
+                       tdrops = dstats->tx_drps;
+                       rbytes = dstats->rx_bytes;
+                       rpkts = dstats->rx_pkts;
+               } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
+               stats->tx_bytes += tbytes;
+               stats->tx_packets += tpkts;
+               stats->tx_dropped += tdrops;
+               stats->rx_bytes += rbytes;
+               stats->rx_packets += rpkts;
+       }
+       return stats;
+}
+
+static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
+                                          struct net_device *dev)
+{
+       vrf_tx_error(dev, skb);
+       return NET_XMIT_DROP;
+}
+
+static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
+                           struct net_device *vrf_dev)
+{
+       struct rtable *rt;
+       int err = 1;
+
+       rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
+       if (IS_ERR(rt))
+               goto out;
+
+       /* TO-DO: what about broadcast ? */
+       if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+               ip_rt_put(rt);
+               goto out;
+       }
+
+       skb_dst_drop(skb);
+       skb_dst_set(skb, &rt->dst);
+       err = 0;
+out:
+       return err;
+}
+
+static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
+                                          struct net_device *vrf_dev)
+{
+       struct iphdr *ip4h = ip_hdr(skb);
+       int ret = NET_XMIT_DROP;
+       struct flowi4 fl4 = {
+               /* needed to match OIF rule */
+               .flowi4_oif = vrf_dev->ifindex,
+               .flowi4_iif = LOOPBACK_IFINDEX,
+               .flowi4_tos = RT_TOS(ip4h->tos),
+               .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC,
+               .daddr = ip4h->daddr,
+       };
+
+       if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
+               goto err;
+
+       if (!ip4h->saddr) {
+               ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
+                                              RT_SCOPE_LINK);
+       }
+
+       ret = ip_local_out(skb);
+       if (unlikely(net_xmit_eval(ret)))
+               vrf_dev->stats.tx_errors++;
+       else
+               ret = NET_XMIT_SUCCESS;
+
+out:
+       return ret;
+err:
+       vrf_tx_error(vrf_dev, skb);
+       goto out;
+}
+
+static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
+{
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               return vrf_process_v4_outbound(skb, dev);
+       case htons(ETH_P_IPV6):
+               return vrf_process_v6_outbound(skb, dev);
+       default:
+               vrf_tx_error(dev, skb);
+               return NET_XMIT_DROP;
+       }
+}
+
+static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       netdev_tx_t ret = is_ip_tx_frame(skb, dev);
+
+       if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+               struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+               u64_stats_update_begin(&dstats->syncp);
+               dstats->tx_pkts++;
+               dstats->tx_bytes += skb->len;
+               u64_stats_update_end(&dstats->syncp);
+       } else {
+               this_cpu_inc(dev->dstats->tx_drps);
+       }
+
+       return ret;
+}
+
+static netdev_tx_t vrf_finish(struct sock *sk, struct sk_buff *skb)
+{
+       return dev_queue_xmit(skb);
+}
+
+static int vrf_output(struct sock *sk, struct sk_buff *skb)
+{
+       struct net_device *dev = skb_dst(skb)->dev;
+
+       IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
+
+       skb->dev = dev;
+       skb->protocol = htons(ETH_P_IP);
+
+       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
+                           NULL, dev,
+                           vrf_finish,
+                           !(IPCB(skb)->flags & IPSKB_REROUTED));
+}
+
+static void vrf_rtable_destroy(struct net_vrf *vrf)
+{
+       struct dst_entry *dst = (struct dst_entry *)vrf->rth;
+
+       dst_destroy(dst);
+       vrf->rth = NULL;
+}
+
+static struct rtable *vrf_rtable_create(struct net_device *dev)
+{
+       struct rtable *rth;
+
+       rth = dst_alloc(&vrf_dst_ops, dev, 2,
+                       DST_OBSOLETE_NONE,
+                       (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+       if (rth) {
+               rth->dst.output = vrf_output;
+               rth->rt_genid   = rt_genid_ipv4(dev_net(dev));
+               rth->rt_flags   = 0;
+               rth->rt_type    = RTN_UNICAST;
+               rth->rt_is_input = 0;
+               rth->rt_iif     = 0;
+               rth->rt_pmtu    = 0;
+               rth->rt_gateway = 0;
+               rth->rt_uses_gateway = 0;
+               INIT_LIST_HEAD(&rth->rt_uncached);
+               rth->rt_uncached_list = NULL;
+       }
+
+       return rth;
+}
+
+/**************************** device handling ********************/
+
+/* cycle interface to flush neighbor cache and move routes across tables */
+static void cycle_netdev(struct net_device *dev)
+{
+       unsigned int flags = dev->flags;
+       int ret;
+
+       if (!netif_running(dev))
+               return;
+
+       ret = dev_change_flags(dev, flags & ~IFF_UP);
+       if (ret >= 0)
+               ret = dev_change_flags(dev, flags);
+
+       if (ret < 0) {
+               netdev_err(dev,
+                          "Failed to cycle device %s; route tables might be wrong!\n",
+                          dev->name);
+       }
+}
+
+static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
+                                         struct net_device *dev)
+{
+       struct list_head *head = &queue->all_slaves;
+       struct slave *slave;
+
+       list_for_each_entry(slave, head, list) {
+               if (slave->dev == dev)
+                       return slave;
+       }
+
+       return NULL;
+}
+
+/* inverse of __vrf_insert_slave */
+static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
+{
+       list_del(&slave->list);
+}
+
+static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
+{
+       list_add(&slave->list, &queue->all_slaves);
+}
+
+static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+       struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
+       struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+       struct net_vrf *vrf = netdev_priv(dev);
+       struct slave_queue *queue = &vrf->queue;
+       int ret = -ENOMEM;
+
+       if (!slave || !vrf_ptr)
+               goto out_fail;
+
+       slave->dev = port_dev;
+       vrf_ptr->ifindex = dev->ifindex;
+       vrf_ptr->tb_id = vrf->tb_id;
+
+       /* register the packet handler for slave ports */
+       ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
+       if (ret) {
+               netdev_err(port_dev,
+                          "Device %s failed to register rx_handler\n",
+                          port_dev->name);
+               goto out_fail;
+       }
+
+       ret = netdev_master_upper_dev_link(port_dev, dev);
+       if (ret < 0)
+               goto out_unregister;
+
+       port_dev->flags |= IFF_SLAVE;
+       __vrf_insert_slave(queue, slave);
+       rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
+       cycle_netdev(port_dev);
+
+       return 0;
+
+out_unregister:
+       netdev_rx_handler_unregister(port_dev);
+out_fail:
+       kfree(vrf_ptr);
+       kfree(slave);
+       return ret;
+}
+
+static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+       if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
+               return -EINVAL;
+
+       return do_vrf_add_slave(dev, port_dev);
+}
+
+/* inverse of do_vrf_add_slave */
+static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+       struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
+       struct net_vrf *vrf = netdev_priv(dev);
+       struct slave_queue *queue = &vrf->queue;
+       struct slave *slave;
+
+       RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
+
+       netdev_upper_dev_unlink(port_dev, dev);
+       port_dev->flags &= ~IFF_SLAVE;
+
+       netdev_rx_handler_unregister(port_dev);
+
+       /* after netdev_rx_handler_unregister for synchronize_rcu */
+       kfree(vrf_ptr);
+
+       cycle_netdev(port_dev);
+
+       slave = __vrf_find_slave_dev(queue, port_dev);
+       if (slave)
+               __vrf_remove_slave(queue, slave);
+
+       kfree(slave);
+
+       return 0;
+}
+
+static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+       return do_vrf_del_slave(dev, port_dev);
+}
+
+static void vrf_dev_uninit(struct net_device *dev)
+{
+       struct net_vrf *vrf = netdev_priv(dev);
+       struct slave_queue *queue = &vrf->queue;
+       struct list_head *head = &queue->all_slaves;
+       struct slave *slave, *next;
+
+       vrf_rtable_destroy(vrf);
+
+       list_for_each_entry_safe(slave, next, head, list)
+               vrf_del_slave(dev, slave->dev);
+
+       free_percpu(dev->dstats);
+       dev->dstats = NULL;
+}
+
+static int vrf_dev_init(struct net_device *dev)
+{
+       struct net_vrf *vrf = netdev_priv(dev);
+
+       INIT_LIST_HEAD(&vrf->queue.all_slaves);
+
+       dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
+       if (!dev->dstats)
+               goto out_nomem;
+
+       /* create the default dst which points back to us */
+       vrf->rth = vrf_rtable_create(dev);
+       if (!vrf->rth)
+               goto out_stats;
+
+       dev->flags = IFF_MASTER | IFF_NOARP;
+
+       return 0;
+
+out_stats:
+       free_percpu(dev->dstats);
+       dev->dstats = NULL;
+out_nomem:
+       return -ENOMEM;
+}
+
+static const struct net_device_ops vrf_netdev_ops = {
+       .ndo_init               = vrf_dev_init,
+       .ndo_uninit             = vrf_dev_uninit,
+       .ndo_start_xmit         = vrf_xmit,
+       .ndo_get_stats64        = vrf_get_stats64,
+       .ndo_add_slave          = vrf_add_slave,
+       .ndo_del_slave          = vrf_del_slave,
+};
+
+static void vrf_get_drvinfo(struct net_device *dev,
+                           struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static const struct ethtool_ops vrf_ethtool_ops = {
+       .get_drvinfo    = vrf_get_drvinfo,
+};
+
+static void vrf_setup(struct net_device *dev)
+{
+       ether_setup(dev);
+
+       /* Initialize the device structure. */
+       dev->netdev_ops = &vrf_netdev_ops;
+       dev->ethtool_ops = &vrf_ethtool_ops;
+       dev->destructor = free_netdev;
+
+       /* Fill in device structure with ethernet-generic values. */
+       eth_hw_addr_random(dev);
+
+       /* don't acquire vrf device's netif_tx_lock when transmitting */
+       dev->features |= NETIF_F_LLTX;
+
+       /* don't allow vrf devices to change network namespaces. */
+       dev->features |= NETIF_F_NETNS_LOCAL;
+}
+
+static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+                       return -EINVAL;
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+                       return -EADDRNOTAVAIL;
+       }
+       return 0;
+}
+
+static void vrf_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+
+       RCU_INIT_POINTER(dev->vrf_ptr, NULL);
+       kfree_rcu(vrf_ptr, rcu);
+       unregister_netdevice_queue(dev, head);
+}
+
+static int vrf_newlink(struct net *src_net, struct net_device *dev,
+                      struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net_vrf *vrf = netdev_priv(dev);
+       struct net_vrf_dev *vrf_ptr;
+       int err;
+
+       if (!data || !data[IFLA_VRF_TABLE])
+               return -EINVAL;
+
+       vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
+
+       dev->priv_flags |= IFF_VRF_MASTER;
+
+       err = -ENOMEM;
+       vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
+       if (!vrf_ptr)
+               goto out_fail;
+
+       vrf_ptr->ifindex = dev->ifindex;
+       vrf_ptr->tb_id = vrf->tb_id;
+
+       err = register_netdevice(dev);
+       if (err < 0)
+               goto out_fail;
+
+       rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
+
+       return 0;
+
+out_fail:
+       kfree(vrf_ptr);
+       free_netdev(dev);
+       return err;
+}
+
+static size_t vrf_nl_getsize(const struct net_device *dev)
+{
+       return nla_total_size(sizeof(u32));  /* IFLA_VRF_TABLE */
+}
+
+static int vrf_fillinfo(struct sk_buff *skb,
+                       const struct net_device *dev)
+{
+       struct net_vrf *vrf = netdev_priv(dev);
+
+       return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
+}
+
+static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
+       [IFLA_VRF_TABLE] = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops vrf_link_ops __read_mostly = {
+       .kind           = DRV_NAME,
+       .priv_size      = sizeof(struct net_vrf),
+
+       .get_size       = vrf_nl_getsize,
+       .policy         = vrf_nl_policy,
+       .validate       = vrf_validate,
+       .fill_info      = vrf_fillinfo,
+
+       .newlink        = vrf_newlink,
+       .dellink        = vrf_dellink,
+       .setup          = vrf_setup,
+       .maxtype        = IFLA_VRF_MAX,
+};
+
+static int vrf_device_event(struct notifier_block *unused,
+                           unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       /* only care about unregister events to drop slave references */
+       if (event == NETDEV_UNREGISTER) {
+               struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+               struct net_device *vrf_dev;
+
+               if (!vrf_ptr || netif_is_vrf(dev))
+                       goto out;
+
+               vrf_dev = netdev_master_upper_dev_get(dev);
+               vrf_del_slave(vrf_dev, dev);
+       }
+out:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block vrf_notifier_block __read_mostly = {
+       .notifier_call = vrf_device_event,
+};
+
+static int __init vrf_init_module(void)
+{
+       int rc;
+
+       vrf_dst_ops.kmem_cachep =
+               kmem_cache_create("vrf_ip_dst_cache",
+                                 sizeof(struct rtable), 0,
+                                 SLAB_HWCACHE_ALIGN,
+                                 NULL);
+
+       if (!vrf_dst_ops.kmem_cachep)
+               return -ENOMEM;
+
+       register_netdevice_notifier(&vrf_notifier_block);
+
+       rc = rtnl_link_register(&vrf_link_ops);
+       if (rc < 0)
+               goto error;
+
+       return 0;
+
+error:
+       unregister_netdevice_notifier(&vrf_notifier_block);
+       kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+       return rc;
+}
+
+static void __exit vrf_cleanup_module(void)
+{
+       rtnl_link_unregister(&vrf_link_ops);
+       unregister_netdevice_notifier(&vrf_notifier_block);
+       kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+}
+
+module_init(vrf_init_module);
+module_exit(vrf_cleanup_module);
+MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
+MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+MODULE_VERSION(DRV_VERSION);
index e90f7a484e1c741b5b51c04481a3d749bd6ab7bd..61b457b9ec00517037e4833790bea97ac53aa832 100644 (file)
@@ -236,7 +236,7 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
 
        hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
                if (inet_sk(vs->sock->sk)->inet_sport == port &&
-                   inet_sk(vs->sock->sk)->sk.sk_family == family &&
+                   vxlan_get_sk_family(vs) == family &&
                    vs->flags == flags)
                        return vs;
        }
@@ -519,10 +519,10 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
                                          u32 data, struct gro_remcsum *grc,
                                          bool nopartial)
 {
-       size_t start, offset, plen;
+       size_t start, offset;
 
        if (skb->remcsum_offload)
-               return NULL;
+               return vh;
 
        if (!NAPI_GRO_CB(skb)->csum_valid)
                return NULL;
@@ -532,17 +532,8 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
                          offsetof(struct udphdr, check) :
                          offsetof(struct tcphdr, check));
 
-       plen = hdrlen + offset + sizeof(u16);
-
-       /* Pull checksum that will be written */
-       if (skb_gro_header_hard(skb, off + plen)) {
-               vh = skb_gro_header_slow(skb, off + plen, off);
-               if (!vh)
-                       return NULL;
-       }
-
-       skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
-                               start, offset, grc, nopartial);
+       vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
+                                    start, offset, grc, nopartial);
 
        skb->remcsum_offload = 1;
 
@@ -573,7 +564,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
                        goto out;
        }
 
-       skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
        skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
 
        flags = ntohl(vh->vx_flags);
@@ -588,6 +578,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
                        goto out;
        }
 
+       skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
        flush = 0;
 
        for (p = *head; p; p = p->next) {
@@ -625,7 +617,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
        struct net_device *dev;
        struct sock *sk = vs->sock->sk;
        struct net *net = sock_net(sk);
-       sa_family_t sa_family = sk->sk_family;
+       sa_family_t sa_family = vxlan_get_sk_family(vs);
        __be16 port = inet_sk(sk)->inet_sport;
        int err;
 
@@ -650,7 +642,7 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
        struct net_device *dev;
        struct sock *sk = vs->sock->sk;
        struct net *net = sock_net(sk);
-       sa_family_t sa_family = sk->sk_family;
+       sa_family_t sa_family = vxlan_get_sk_family(vs);
        __be16 port = inet_sk(sk)->inet_sport;
 
        rcu_read_lock();
@@ -931,10 +923,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
                        struct vxlan_rdst *rd;
 
-                       if (idx < cb->args[0])
-                               goto skip;
-
                        list_for_each_entry_rcu(rd, &f->remotes, list) {
+                               if (idx < cb->args[0])
+                                       goto skip;
+
                                err = vxlan_fdb_info(skb, vxlan, f,
                                                     NETLINK_CB(cb->skb).portid,
                                                     cb->nlh->nlmsg_seq,
@@ -942,9 +934,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                                                     NLM_F_MULTI, rd);
                                if (err < 0)
                                        goto out;
-                       }
 skip:
-                       ++idx;
+                               ++idx;
+                       }
                }
        }
 out:
@@ -1110,6 +1102,9 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
 {
        size_t start, offset, plen;
 
+       if (skb->remcsum_offload)
+               return vh;
+
        start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
        offset = start + ((data & VXLAN_RCO_UDP) ?
                          offsetof(struct udphdr, check) :
@@ -1141,7 +1136,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
        union vxlan_addr *remote_ip;
 
        /* For flow based devices, map all packets to VNI 0 */
-       if (vs->flags & VXLAN_F_FLOW_BASED)
+       if (vs->flags & VXLAN_F_COLLECT_METADATA)
                vni = 0;
 
        /* Is this VNI defined? */
@@ -1183,7 +1178,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
 
        skb_reset_network_header(skb);
        /* In flow-based mode, GBP is carried in dst_metadata */
-       if (!(vs->flags & VXLAN_F_FLOW_BASED))
+       if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
                skb->mark = md->gbp;
 
        if (oip6)
@@ -1213,7 +1208,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
        stats->rx_bytes += skb->len;
        u64_stats_update_end(&stats->syncp);
 
-       netif_rx(skb);
+       gro_cells_receive(&vxlan->gro_cells, skb);
 
        return;
 drop:
@@ -1269,17 +1264,27 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        }
 
        if (vxlan_collect_metadata(vs)) {
-               const struct iphdr *iph = ip_hdr(skb);
-
                tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
                if (!tun_dst)
                        goto drop;
 
                info = &tun_dst->u.tun_info;
-               info->key.ipv4_src = iph->saddr;
-               info->key.ipv4_dst = iph->daddr;
-               info->key.ipv4_tos = iph->tos;
-               info->key.ipv4_ttl = iph->ttl;
+               if (vxlan_get_sk_family(vs) == AF_INET) {
+                       const struct iphdr *iph = ip_hdr(skb);
+
+                       info->key.u.ipv4.src = iph->saddr;
+                       info->key.u.ipv4.dst = iph->daddr;
+                       info->key.tos = iph->tos;
+                       info->key.ttl = iph->ttl;
+               } else {
+                       const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+                       info->key.u.ipv6.src = ip6h->saddr;
+                       info->key.u.ipv6.dst = ip6h->daddr;
+                       info->key.tos = ipv6_get_dsfield(ip6h);
+                       info->key.ttl = ip6h->hop_limit;
+               }
+
                info->key.tp_src = udp_hdr(skb)->source;
                info->key.tp_dst = udp_hdr(skb)->dest;
 
@@ -1894,6 +1899,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        struct ip_tunnel_info *info;
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct sock *sk = vxlan->vn_sock->sock->sk;
+       unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
        struct flowi4 fl4;
@@ -1908,8 +1914,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        int err;
        u32 flags = vxlan->flags;
 
-       /* FIXME: Support IPv6 */
-       info = skb_tunnel_info(skb, AF_INET);
+       info = skb_tunnel_info(skb);
 
        if (rdst) {
                dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
@@ -1924,8 +1929,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
                vni = be64_to_cpu(info->key.tun_id);
-               remote_ip.sin.sin_family = AF_INET;
-               remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst;
+               remote_ip.sa.sa_family = family;
+               if (family == AF_INET)
+                       remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
+               else
+                       remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
                dst = &remote_ip;
        }
 
@@ -1951,23 +1959,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
                                     vxlan->cfg.port_max, true);
 
+       if (info) {
+               if (info->key.tun_flags & TUNNEL_CSUM)
+                       flags |= VXLAN_F_UDP_CSUM;
+               else
+                       flags &= ~VXLAN_F_UDP_CSUM;
+
+               ttl = info->key.ttl;
+               tos = info->key.tos;
+
+               if (info->options_len)
+                       md = ip_tunnel_info_opts(info, sizeof(*md));
+       } else {
+               md->gbp = skb->mark;
+       }
+
        if (dst->sa.sa_family == AF_INET) {
-               if (info) {
-                       if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
-                               df = htons(IP_DF);
-                       if (info->key.tun_flags & TUNNEL_CSUM)
-                               flags |= VXLAN_F_UDP_CSUM;
-                       else
-                               flags &= ~VXLAN_F_UDP_CSUM;
-
-                       ttl = info->key.ipv4_ttl;
-                       tos = info->key.ipv4_tos;
-
-                       if (info->options_len)
-                               md = ip_tunnel_info_opts(info, sizeof(*md));
-               } else {
-                       md->gbp = skb->mark;
-               }
+               if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
+                       df = htons(IP_DF);
 
                memset(&fl4, 0, sizeof(fl4));
                fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
@@ -2025,7 +2034,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        } else {
                struct dst_entry *ndst;
                struct flowi6 fl6;
-               u32 flags;
+               u32 rt6i_flags;
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
@@ -2050,9 +2059,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                /* Bypass encapsulation if the destination is local */
-               flags = ((struct rt6_info *)ndst)->rt6i_flags;
-               if (flags & RTF_LOCAL &&
-                   !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+               rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
+               if (rt6i_flags & RTF_LOCAL &&
+                   !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
                        struct vxlan_dev *dst_vxlan;
 
                        dst_release(ndst);
@@ -2066,12 +2075,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
-               md->gbp = skb->mark;
-
                err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
                                      0, ttl, src_port, dst_port, htonl(vni << 8), md,
                                      !net_eq(vxlan->net, dev_net(vxlan->dev)),
-                                     vxlan->flags);
+                                     flags);
 #endif
        }
 
@@ -2104,8 +2111,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
        struct vxlan_rdst *rdst, *fdst = NULL;
        struct vxlan_fdb *f;
 
-       /* FIXME: Support IPv6 */
-       info = skb_tunnel_info(skb, AF_INET);
+       info = skb_tunnel_info(skb);
 
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
@@ -2129,7 +2135,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 #endif
        }
 
-       if (vxlan->flags & VXLAN_F_FLOW_BASED &&
+       if (vxlan->flags & VXLAN_F_COLLECT_METADATA &&
            info && info->mode == IP_TUNNEL_INFO_TX) {
                vxlan_xmit_one(skb, dev, NULL, false);
                return NETDEV_TX_OK;
@@ -2390,7 +2396,7 @@ void vxlan_get_rx_port(struct net_device *dev)
        for (i = 0; i < PORT_HASH_SIZE; ++i) {
                hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
                        port = inet_sk(vs->sock->sk)->inet_sport;
-                       sa_family = vs->sock->sk->sk_family;
+                       sa_family = vxlan_get_sk_family(vs);
                        dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
                                                            port);
                }
@@ -2416,7 +2422,6 @@ static void vxlan_setup(struct net_device *dev)
        dev->destructor = free_netdev;
        SET_NETDEV_DEVTYPE(dev, &vxlan_type);
 
-       dev->tx_queue_len = 0;
        dev->features   |= NETIF_F_LLTX;
        dev->features   |= NETIF_F_SG | NETIF_F_HW_CSUM;
        dev->features   |= NETIF_F_RXCSUM;
@@ -2428,7 +2433,7 @@ static void vxlan_setup(struct net_device *dev)
        dev->hw_features |= NETIF_F_GSO_SOFTWARE;
        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
        netif_keep_dst(dev);
-       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 
        INIT_LIST_HEAD(&vxlan->next);
        spin_lock_init(&vxlan->hash_lock);
@@ -2441,6 +2446,8 @@ static void vxlan_setup(struct net_device *dev)
 
        vxlan->dev = dev;
 
+       gro_cells_init(&vxlan->gro_cells, dev);
+
        for (h = 0; h < FDB_HASH_SIZE; ++h)
                INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
 }
@@ -2462,7 +2469,6 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
        [IFLA_VXLAN_RSC]        = { .type = NLA_U8 },
        [IFLA_VXLAN_L2MISS]     = { .type = NLA_U8 },
        [IFLA_VXLAN_L3MISS]     = { .type = NLA_U8 },
-       [IFLA_VXLAN_FLOWBASED]  = { .type = NLA_U8 },
        [IFLA_VXLAN_COLLECT_METADATA]   = { .type = NLA_U8 },
        [IFLA_VXLAN_PORT]       = { .type = NLA_U16 },
        [IFLA_VXLAN_UDP_CSUM]   = { .type = NLA_U8 },
@@ -2814,10 +2820,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        if (data[IFLA_VXLAN_LIMIT])
                conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
 
-       if (data[IFLA_VXLAN_FLOWBASED] &&
-           nla_get_u8(data[IFLA_VXLAN_FLOWBASED]))
-               conf.flags |= VXLAN_F_FLOW_BASED;
-
        if (data[IFLA_VXLAN_COLLECT_METADATA] &&
            nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
                conf.flags |= VXLAN_F_COLLECT_METADATA;
@@ -2885,6 +2887,7 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
                hlist_del_rcu(&vxlan->hlist);
        spin_unlock(&vn->sock_lock);
 
+       gro_cells_destroy(&vxlan->gro_cells);
        list_del(&vxlan->next);
        unregister_netdevice_queue(dev, head);
 }
@@ -2903,7 +2906,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_RSC */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_L2MISS */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_L3MISS */
-               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_FLOWBASED */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_COLLECT_METADATA */
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
                nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -2970,8 +2973,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
                        !!(vxlan->flags & VXLAN_F_L2MISS)) ||
            nla_put_u8(skb, IFLA_VXLAN_L3MISS,
                        !!(vxlan->flags & VXLAN_F_L3MISS)) ||
-           nla_put_u8(skb, IFLA_VXLAN_FLOWBASED,
-                      !!(vxlan->flags & VXLAN_F_FLOW_BASED)) ||
+           nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
+                      !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
            nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
            nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
            nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
@@ -3093,8 +3096,10 @@ static void __net_exit vxlan_exit_net(struct net *net)
                /* If vxlan->dev is in the same netns, it has already been added
                 * to the list by the previous loop.
                 */
-               if (!net_eq(dev_net(vxlan->dev), net))
+               if (!net_eq(dev_net(vxlan->dev), net)) {
+                       gro_cells_destroy(&vxlan->gro_cells);
                        unregister_netdevice_queue(vxlan->dev, &list);
+               }
        }
 
        unregister_netdevice_many(&list);
index 7193b7304fdd3ed4b69c0125732d4a024d4a4b36..848ea6a399f236b14cc5d9a79dd38038e0331aec 100644 (file)
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
                chan->netdev->base_addr = chan->cosa->datareg;
                chan->netdev->irq = chan->cosa->irq;
                chan->netdev->dma = chan->cosa->dma;
-               if (register_hdlc_device(chan->netdev)) {
+               err = register_hdlc_device(chan->netdev);
+               if (err) {
                        netdev_warn(chan->netdev,
                                    "register_hdlc_device() failed\n");
                        free_netdev(chan->netdev);
index 3ebed1c40abb11c192db0acc36d90bfd2028c646..e92aaf61590109430aa6b9b6cd100b7d35daf90b 100644 (file)
@@ -1096,7 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
        }
        dev->netdev_ops = &pvc_ops;
        dev->mtu = HDLC_MAX_MTU;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->ml_priv = pvc;
 
        if (register_netdevice(dev) != 0) {
index 2399a39217625e55c9a52551263e3b9058d144c5..b1278f9f24baee8ba4c08a107d1c3d2ca5432c03 100644 (file)
@@ -5,7 +5,6 @@ config ATH5K
        select MAC80211_LEDS
        select LEDS_CLASS
        select NEW_LEDS
-       select AVERAGE
        select ATH5K_AHB if ATH25
        select ATH5K_PCI if !ATH25
        ---help---
index 5c008757662b50e05b63024a2e293bfd1a30d671..38be2702c0e23a485f886315a3d5966c13ba3848 100644 (file)
@@ -223,7 +223,7 @@ static void
 ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
                         bool ofdm_trigger)
 {
-       int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+       int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
 
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
                ofdm_trigger ? "ODFM" : "CCK");
@@ -309,7 +309,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
 static void
 ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
 {
-       int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+       int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
 
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
 
index e22b0e778927155ed693ad84c177b9a6c8d03715..fa6e89e5c4213dbb76da4a5ef2b780550cd6b954 100644 (file)
@@ -1252,6 +1252,8 @@ struct ath5k_statistics {
 #define ATH5K_TXQ_LEN_MAX      (ATH_TXBUF / 4)         /* bufs per queue */
 #define ATH5K_TXQ_LEN_LOW      (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
 
+DECLARE_EWMA(beacon_rssi, 1024, 8)
+
 /* Driver state associated with an instance of a device */
 struct ath5k_hw {
        struct ath_common       common;
@@ -1432,7 +1434,7 @@ struct ath5k_hw {
        struct ath5k_nfcal_hist ah_nfcal_hist;
 
        /* average beacon RSSI in our BSS (used by ANI) */
-       struct ewma             ah_beacon_rssi_avg;
+       struct ewma_beacon_rssi ah_beacon_rssi_avg;
 
        /* noise floor from last periodic calibration */
        s32                     ah_noise_floor;
index 23552f43d1253dd81de3e4f7f6f400a14bff2bd4..342563a3706f403b445a24028f19b059ccd6285a 100644 (file)
@@ -1430,7 +1430,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
        trace_ath5k_rx(ah, skb);
 
        if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
-               ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
+               ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
 
                /* check beacons in IBSS mode */
                if (ah->opmode == NL80211_IFTYPE_ADHOC)
@@ -2936,7 +2936,7 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
        ah->ah_cal_next_short = jiffies +
                msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
 
-       ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
+       ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg);
 
        /* clear survey data and cycle counters */
        memset(&ah->survey, 0, sizeof(ah->survey));
index c70782e8f07bd704b2ff495339eb5ab6b65864bc..654a1e33f8278743fdb6034e1681b2a3e4aa8515 100644 (file)
@@ -722,7 +722,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
                        st->mib_intr);
        len += snprintf(buf + len, sizeof(buf) - len,
                        "beacon RSSI average:\t%d\n",
-                       (int)ewma_read(&ah->ah_beacon_rssi_avg));
+                       (int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg));
 
 #define CC_PRINT(_struct, _field) \
        _struct._field, \
index dab1323dfec76a1b09183bb8779e482dd7299c49..172a9ff4aaabc5a27d0a318f9cfe94d05f48c21a 100644 (file)
@@ -794,8 +794,11 @@ void ath9k_htc_ani_work(struct work_struct *work)
                common->ani.longcal_timer = timestamp;
        }
 
-       /* Short calibration applies only while caldone is false */
-       if (!common->ani.caldone) {
+       /*
+        * Short calibration applies only while caldone
+        * is false or -ETIMEDOUT
+        */
+       if (common->ani.caldone <= 0) {
                if ((timestamp - common->ani.shortcal_timer) >=
                    short_cal_interval) {
                        shortcal = true;
@@ -844,7 +847,11 @@ set_timer:
        */
        cal_interval = ATH_LONG_CALINTERVAL;
        cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
-       if (!common->ani.caldone)
+       /*
+        * Short calibration applies only while caldone
+        * is false or -ETIMEDOUT
+        */
+       if (common->ani.caldone <= 0)
                cal_interval = min(cal_interval, (u32)short_cal_interval);
 
        ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
index a31a6804dc34eff8174b06e15d8ee14bb5405888..1dd0339de372901d753fa70f975e4c5b0f492f21 100644 (file)
@@ -3186,6 +3186,7 @@ static struct {
        { AR_SREV_VERSION_9550,         "9550" },
        { AR_SREV_VERSION_9565,         "9565" },
        { AR_SREV_VERSION_9531,         "9531" },
+       { AR_SREV_VERSION_9561,         "9561" },
 };
 
 /* For devices with external radios */
index 25d1cbd34306e03ea4827e09509c345d11b5a1df..b2f0d245bcf3a0e71fb96797c47f71ad0ca736db 100644 (file)
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
                switch (phy->rev) {
                case 6:
                case 5:
-                       if (sprom->fem.ghz5.extpa_gain == 3)
+                       if (sprom->fem.ghz2.extpa_gain == 3)
                                return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
                        /* fall through */
                case 4:
index ffe526070d6f19a99de1a777129a51692e508147..a293275c1b0bcffcc55265a55d091b3e17edf1af 100644 (file)
@@ -469,6 +469,36 @@ brcmf_find_wpsie(const u8 *parse, u32 len)
        return NULL;
 }
 
+static int brcmf_vif_change_validate(struct brcmf_cfg80211_info *cfg,
+                                    struct brcmf_cfg80211_vif *vif,
+                                    enum nl80211_iftype new_type)
+{
+       int iftype_num[NUM_NL80211_IFTYPES];
+       struct brcmf_cfg80211_vif *pos;
+
+       memset(&iftype_num[0], 0, sizeof(iftype_num));
+       list_for_each_entry(pos, &cfg->vif_list, list)
+               if (pos == vif)
+                       iftype_num[new_type]++;
+               else
+                       iftype_num[pos->wdev.iftype]++;
+
+       return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
+
+static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg,
+                                 enum nl80211_iftype new_type)
+{
+       int iftype_num[NUM_NL80211_IFTYPES];
+       struct brcmf_cfg80211_vif *pos;
+
+       memset(&iftype_num[0], 0, sizeof(iftype_num));
+       list_for_each_entry(pos, &cfg->vif_list, list)
+               iftype_num[pos->wdev.iftype]++;
+
+       iftype_num[new_type]++;
+       return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
 
 static void convert_key_from_CPU(struct brcmf_wsec_key *key,
                                 struct brcmf_wsec_key_le *key_le)
@@ -663,8 +693,14 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
                                                     struct vif_params *params)
 {
        struct wireless_dev *wdev;
+       int err;
 
        brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
+       err = brcmf_vif_add_validate(wiphy_to_cfg(wiphy), type);
+       if (err) {
+               brcmf_err("iface validation failed: err=%d\n", err);
+               return ERR_PTR(err);
+       }
        switch (type) {
        case NL80211_IFTYPE_ADHOC:
        case NL80211_IFTYPE_STATION:
@@ -823,8 +859,12 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
        s32 ap = 0;
        s32 err = 0;
 
-       brcmf_dbg(TRACE, "Enter, ndev=%p, type=%d\n", ndev, type);
-
+       brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type);
+       err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type);
+       if (err) {
+               brcmf_err("iface validation failed: err=%d\n", err);
+               return err;
+       }
        switch (type) {
        case NL80211_IFTYPE_MONITOR:
        case NL80211_IFTYPE_WDS:
@@ -5695,63 +5735,132 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
        }
 };
 
+/**
+ * brcmf_setup_ifmodes() - determine interface modes and combinations.
+ *
+ * @wiphy: wiphy object.
+ * @ifp: interface object needed for feat module api.
+ *
+ * The interface modes and combinations are determined dynamically here
+ * based on firmware functionality.
+ *
+ * no p2p and no mbss:
+ *
+ *     #STA <= 1, #AP <= 1, channels = 1, 2 total
+ *
+ * no p2p and mbss:
+ *
+ *     #STA <= 1, #AP <= 1, channels = 1, 2 total
+ *     #AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, no mchan, and mbss:
+ *
+ *     #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total
+ *     #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ *     #AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, mchan, and mbss:
+ *
+ *     #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
+ *     #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ *     #AP <= 4, matching BI, channels = 1, 4 total
+ */
 static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
        struct ieee80211_iface_combination *combo = NULL;
-       struct ieee80211_iface_limit *limits = NULL;
-       int i = 0, max_iface_cnt;
+       struct ieee80211_iface_limit *c0_limits = NULL;
+       struct ieee80211_iface_limit *p2p_limits = NULL;
+       struct ieee80211_iface_limit *mbss_limits = NULL;
+       bool mbss, p2p;
+       int i, c, n_combos;
 
-       combo = kzalloc(sizeof(*combo), GFP_KERNEL);
+       mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS);
+       p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P);
+
+       n_combos = 1 + !!p2p + !!mbss;
+       combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL);
        if (!combo)
                goto err;
 
-       limits = kzalloc(sizeof(*limits) * 4, GFP_KERNEL);
-       if (!limits)
+       c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
+       if (!c0_limits)
                goto err;
 
+       if (p2p) {
+               p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
+               if (!p2p_limits)
+                       goto err;
+       }
+
+       if (mbss) {
+               mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
+               if (!mbss_limits)
+                       goto err;
+       }
+
        wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                 BIT(NL80211_IFTYPE_ADHOC) |
                                 BIT(NL80211_IFTYPE_AP);
 
-       if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
-               combo->num_different_channels = 2;
-       else
-               combo->num_different_channels = 1;
-
-       if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
-               limits[i].max = 1;
-               limits[i++].types = BIT(NL80211_IFTYPE_STATION);
-               limits[i].max = 4;
-               limits[i++].types = BIT(NL80211_IFTYPE_AP);
-               max_iface_cnt = 5;
-       } else {
-               limits[i].max = 2;
-               limits[i++].types = BIT(NL80211_IFTYPE_STATION) |
-                                   BIT(NL80211_IFTYPE_AP);
-               max_iface_cnt = 2;
-       }
-
-       if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P)) {
+       c = 0;
+       i = 0;
+       combo[c].num_different_channels = 1;
+       c0_limits[i].max = 1;
+       c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+       if (p2p) {
+               if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
+                       combo[c].num_different_channels = 2;
                wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
                                          BIT(NL80211_IFTYPE_P2P_GO) |
                                          BIT(NL80211_IFTYPE_P2P_DEVICE);
-               limits[i].max = 1;
-               limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
-                                   BIT(NL80211_IFTYPE_P2P_GO);
-               limits[i].max = 1;
-               limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
-               max_iface_cnt += 2;
-       }
-       combo->max_interfaces = max_iface_cnt;
-       combo->limits = limits;
-       combo->n_limits = i;
-
+               c0_limits[i].max = 1;
+               c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+               c0_limits[i].max = 1;
+               c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                      BIT(NL80211_IFTYPE_P2P_GO);
+       } else {
+               c0_limits[i].max = 1;
+               c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+       }
+       combo[c].max_interfaces = i;
+       combo[c].n_limits = i;
+       combo[c].limits = c0_limits;
+
+       if (p2p) {
+               c++;
+               i = 0;
+               combo[c].num_different_channels = 1;
+               p2p_limits[i].max = 1;
+               p2p_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+               p2p_limits[i].max = 1;
+               p2p_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+               p2p_limits[i].max = 1;
+               p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT);
+               p2p_limits[i].max = 1;
+               p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+               combo[c].max_interfaces = i;
+               combo[c].n_limits = i;
+               combo[c].limits = p2p_limits;
+       }
+
+       if (mbss) {
+               c++;
+               combo[c].beacon_int_infra_match = true;
+               combo[c].num_different_channels = 1;
+               mbss_limits[0].max = 4;
+               mbss_limits[0].types = BIT(NL80211_IFTYPE_AP);
+               combo[c].max_interfaces = 4;
+               combo[c].n_limits = 1;
+               combo[c].limits = mbss_limits;
+       }
+       wiphy->n_iface_combinations = n_combos;
        wiphy->iface_combinations = combo;
-       wiphy->n_iface_combinations = 1;
        return 0;
 
 err:
-       kfree(limits);
+       kfree(c0_limits);
+       kfree(p2p_limits);
+       kfree(mbss_limits);
        kfree(combo);
        return -ENOMEM;
 }
@@ -5786,7 +5895,9 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
 static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
        struct brcmf_pub *drvr = ifp->drvr;
+       const struct ieee80211_iface_combination *combo;
        struct ieee80211_supported_band *band;
+       u16 max_interfaces = 0;
        __le32 bandlist[3];
        u32 n_bands;
        int err, i;
@@ -5799,8 +5910,13 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
        if (err)
                return err;
 
-       for (i = 0; i < wiphy->iface_combinations->max_interfaces &&
-            i < ARRAY_SIZE(drvr->addresses); i++) {
+       for (i = 0, combo = wiphy->iface_combinations;
+            i < wiphy->n_iface_combinations; i++, combo++) {
+               max_interfaces = max(max_interfaces, combo->max_interfaces);
+       }
+
+       for (i = 0; i < max_interfaces && i < ARRAY_SIZE(drvr->addresses);
+            i++) {
                u8 *addr = drvr->addresses[i].addr;
 
                memcpy(addr, drvr->mac, ETH_ALEN);
@@ -6073,11 +6189,15 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
 
 static void brcmf_free_wiphy(struct wiphy *wiphy)
 {
+       int i;
+
        if (!wiphy)
                return;
 
-       if (wiphy->iface_combinations)
-               kfree(wiphy->iface_combinations->limits);
+       if (wiphy->iface_combinations) {
+               for (i = 0; i < wiphy->n_iface_combinations; i++)
+                       kfree(wiphy->iface_combinations[i].limits);
+       }
        kfree(wiphy->iface_combinations);
        if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
                kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
index 743f16b6a07296e8e5b695598002d78669222870..971920f77b68eb1bcc02043d7a19f14c5a9e68b2 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/device.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/bcm47xx_nvram.h>
 
 #include "debug.h"
 #include "firmware.h"
@@ -426,18 +427,32 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
        struct brcmf_fw *fwctx = ctx;
        u32 nvram_length = 0;
        void *nvram = NULL;
+       u8 *data = NULL;
+       size_t data_len;
+       bool raw_nvram;
 
        brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
-       if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
-               goto fail;
+       if (fw && fw->data) {
+               data = (u8 *)fw->data;
+               data_len = fw->size;
+               raw_nvram = false;
+       } else {
+               data = bcm47xx_nvram_get_contents(&data_len);
+               if (!data && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+                       goto fail;
+               raw_nvram = true;
+       }
 
-       if (fw) {
-               nvram = brcmf_fw_nvram_strip(fw->data, fw->size, &nvram_length,
+       if (data)
+               nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length,
                                             fwctx->domain_nr, fwctx->bus_nr);
+
+       if (raw_nvram)
+               bcm47xx_nvram_release_contents(data);
+       if (fw)
                release_firmware(fw);
-               if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
-                       goto fail;
-       }
+       if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+               goto fail;
 
        fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
        kfree(fwctx);
@@ -473,15 +488,9 @@ static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
        if (!ret)
                return;
 
-       /* when nvram is optional call .done() callback here */
-       if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
-               fwctx->done(fwctx->dev, fw, NULL, 0);
-               kfree(fwctx);
-               return;
-       }
+       brcmf_fw_request_nvram_done(NULL, fwctx);
+       return;
 
-       /* failed nvram request */
-       release_firmware(fw);
 fail:
        brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
        device_release_driver(fwctx->dev);
index 59440631fec50637fc3f35e162defb230d1f420c..8d1ab4ab5be809ca8fab1a4e37a31f1af8538391 100644 (file)
@@ -194,11 +194,15 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
        spin_lock_irqsave(&flow->block_lock, flags);
 
        ring = flow->rings[flowid];
+       if (ring->blocked == blocked) {
+               spin_unlock_irqrestore(&flow->block_lock, flags);
+               return;
+       }
        ifidx = brcmf_flowring_ifidx_get(flow, flowid);
 
        currently_blocked = false;
        for (i = 0; i < flow->nrofrings; i++) {
-               if (flow->rings[i]) {
+               if ((flow->rings[i]) && (i != flowid)) {
                        ring = flow->rings[i];
                        if ((ring->status == RING_OPEN) &&
                            (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
@@ -209,8 +213,8 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
                        }
                }
        }
-       ring->blocked = blocked;
-       if (currently_blocked == blocked) {
+       flow->rings[flowid]->blocked = blocked;
+       if (currently_blocked) {
                spin_unlock_irqrestore(&flow->block_lock, flags);
                return;
        }
index cbf033f59109db924e824c18983cd511d1aab949..1326898d608ee8e96067d3c9c3e118bdbcab0314 100644 (file)
@@ -85,7 +85,6 @@ struct brcmf_event;
        BRCMF_ENUM_DEF(IF, 54) \
        BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \
        BRCMF_ENUM_DEF(RSSI, 56) \
-       BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
        BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
        BRCMF_ENUM_DEF(ACTION_FRAME, 59) \
        BRCMF_ENUM_DEF(ACTION_FRAME_COMPLETE, 60) \
@@ -103,8 +102,7 @@ struct brcmf_event;
        BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
        BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
        BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \
-       BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
-       BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
+       BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
 
 #define BRCMF_ENUM_DEF(id, val) \
        BRCMF_E_##id = (val),
@@ -112,7 +110,11 @@ struct brcmf_event;
 /* firmware event codes sent by the dongle */
 enum brcmf_fweh_event_code {
        BRCMF_FWEH_EVENT_ENUM_DEFLIST
-       BRCMF_E_LAST
+       /* this determines event mask length which must match
+        * minimum length check in device firmware so it is
+        * hard-coded here.
+        */
+       BRCMF_E_LAST = 139
 };
 #undef BRCMF_ENUM_DEF
 
index 898c3801e65874647694d281614e146379da8822..7b2136c9badb5ce984d776bb11aa317da6083f9e 100644 (file)
@@ -1360,6 +1360,60 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
        }
 }
 
+#ifdef DEBUG
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+       struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+       struct brcmf_pub *drvr = bus_if->drvr;
+       struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+       struct brcmf_commonring *commonring;
+       u16 i;
+       struct brcmf_flowring_ring *ring;
+       struct brcmf_flowring_hash *hash;
+
+       commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+       seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
+                  commonring->r_ptr, commonring->w_ptr, commonring->depth);
+       commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
+       seq_printf(seq, "h2d_rx_submit:  rp %4u, wp %4u, depth %4u\n",
+                  commonring->r_ptr, commonring->w_ptr, commonring->depth);
+       commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
+       seq_printf(seq, "d2h_ctl_cmplt:  rp %4u, wp %4u, depth %4u\n",
+                  commonring->r_ptr, commonring->w_ptr, commonring->depth);
+       commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
+       seq_printf(seq, "d2h_tx_cmplt:   rp %4u, wp %4u, depth %4u\n",
+                  commonring->r_ptr, commonring->w_ptr, commonring->depth);
+       commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
+       seq_printf(seq, "d2h_rx_cmplt:   rp %4u, wp %4u, depth %4u\n",
+                  commonring->r_ptr, commonring->w_ptr, commonring->depth);
+
+       seq_printf(seq, "\nh2d_flowrings: depth %u\n",
+                  BRCMF_H2D_TXFLOWRING_MAX_ITEM);
+       seq_puts(seq, "Active flowrings:\n");
+       hash = msgbuf->flow->hash;
+       for (i = 0; i < msgbuf->flow->nrofrings; i++) {
+               if (!msgbuf->flow->rings[i])
+                       continue;
+               ring = msgbuf->flow->rings[i];
+               if (ring->status != RING_OPEN)
+                       continue;
+               commonring = msgbuf->flowrings[i];
+               hash = &msgbuf->flow->hash[ring->hash_id];
+               seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
+                               "        ifidx %u, fifo %u, da %pM\n",
+                               i, commonring->r_ptr, commonring->w_ptr,
+                               skb_queue_len(&ring->skblist), ring->blocked,
+                               hash->ifidx, hash->fifo, hash->mac);
+       }
+
+       return 0;
+}
+#else
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+       return 0;
+}
+#endif
 
 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
 {
@@ -1460,6 +1514,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
        spin_lock_init(&msgbuf->flowring_work_lock);
        INIT_LIST_HEAD(&msgbuf->work_queue);
 
+       brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
+
        return 0;
 
 fail:
index 01de1a3bf94ef0965d03dfd9c6a23d5fd2f744b6..80d4228ba7543ee024bd95b72956595772509d27 100644 (file)
@@ -865,7 +865,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
 
        switch(type) {
        case HOSTAP_INTERFACE_AP:
-               dev->tx_queue_len = 0;  /* use main radio device queue */
+               dev->priv_flags |= IFF_NO_QUEUE;        /* use main radio device queue */
                dev->netdev_ops = &hostap_mgmt_netdev_ops;
                dev->type = ARPHRD_IEEE80211;
                dev->header_ops = &hostap_80211_ops;
@@ -874,7 +874,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
                dev->netdev_ops = &hostap_master_ops;
                break;
        default:
-               dev->tx_queue_len = 0;  /* use main radio device queue */
+               dev->priv_flags |= IFF_NO_QUEUE;        /* use main radio device queue */
                dev->netdev_ops = &hostap_netdev_ops;
        }
 
index 101ef310929220a16ae3222246eee3de593185cb..991def878881d6bab84057d6a450baa07b524c52 100644 (file)
@@ -122,7 +122,7 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
 void iwl_down(struct iwl_priv *priv);
 void iwl_cancel_deferred_work(struct iwl_priv *priv);
 void iwlagn_prepare_restart(struct iwl_priv *priv);
-void iwl_rx_dispatch(struct iwl_op_mode *op_mode,
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
                     struct iwl_rx_cmd_buffer *rxb);
 
 bool iwl_check_for_ct_kill(struct iwl_priv *priv);
@@ -473,7 +473,7 @@ do {                                                                        \
 } while (0)
 #endif                         /* CONFIG_IWLWIFI_DEBUG */
 
-extern const char *const iwl_dvm_cmd_strings[REPLY_MAX];
+extern const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1];
 
 static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
 {
index 0ffb6ff1a255f8ac609ba3799c02489ab853eea1..b15e44f8d1bd7858939cd4379374dfd2aa37cd9c 100644 (file)
@@ -310,12 +310,8 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
        pos += scnprintf(buf + pos, buf_size - pos,
                         "NVM version: 0x%x\n", nvm_ver);
        for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
-               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-               hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
-                                  buf_size - pos, 0);
-               pos += strlen(buf + pos);
-               if (buf_size - pos > 0)
-                       buf[pos++] = '\n';
+               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+                                ofs, ptr + ofs);
        }
 
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
index 1f51757f4a93f43ae0d9db9da1daaace43b0a933..0ba3e56d6015b16feb34acc71c36a81d42453eb6 100644 (file)
@@ -669,6 +669,8 @@ struct iwl_priv {
        /* ieee device used by generic ieee processing code */
        struct ieee80211_hw *hw;
 
+       struct napi_struct *napi;
+
        struct list_head calib_results;
 
        struct workqueue_struct *workqueue;
index 234e30f498b2dde18f3f355d5cfdcf13db6dc185..e7616f0ee6e88061589d84fb98357ea72c79eae1 100644 (file)
@@ -2029,17 +2029,6 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
        return false;
 }
 
-static void iwl_napi_add(struct iwl_op_mode *op_mode,
-                        struct napi_struct *napi,
-                        struct net_device *napi_dev,
-                        int (*poll)(struct napi_struct *, int),
-                        int weight)
-{
-       struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
-       ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
-}
-
 static const struct iwl_op_mode_ops iwl_dvm_ops = {
        .start = iwl_op_mode_dvm_start,
        .stop = iwl_op_mode_dvm_stop,
@@ -2052,7 +2041,6 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
        .cmd_queue_full = iwl_cmd_queue_full,
        .nic_config = iwl_nic_config,
        .wimax_active = iwl_wimax_active,
-       .napi_add = iwl_napi_add,
 };
 
 /*****************************************************************************
index c91374fe9a5859e251f2252aa2f9965afb1cf4a7..4a45b0b594c75eaa090ebe10d8f8236107a84928 100644 (file)
@@ -39,7 +39,7 @@
 
 #define IWL_CMD_ENTRY(x) [x] = #x
 
-const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
+const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1] = {
        IWL_CMD_ENTRY(REPLY_ALIVE),
        IWL_CMD_ENTRY(REPLY_ERROR),
        IWL_CMD_ENTRY(REPLY_ECHO),
@@ -763,7 +763,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
 
        memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
-       ieee80211_rx(priv->hw, skb);
+       ieee80211_rx_napi(priv->hw, skb, priv->napi);
 }
 
 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -1073,7 +1073,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
                iwlagn_bt_rx_handler_setup(priv);
 }
 
-void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+                    struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
index fa35da4edda26031e4aa9ea0d74292cabe479c26..6951aba620eb74a13f6fc2c2da36a043272d8a43 100644 (file)
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  15
+#define IWL7260_UCODE_API_MAX  17
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   12
index 7caea69570d47a2485dc9cb607d408507904ed54..197abe43ddc532eb418f8271fafaff0f7e0218b8 100644 (file)
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  15
+#define IWL8000_UCODE_API_MAX  17
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   12
@@ -97,8 +97,9 @@
 #define DEFAULT_NVM_FILE_FAMILY_8000B          "nvmData-8000B"
 #define DEFAULT_NVM_FILE_FAMILY_8000C          "nvmData-8000C"
 
-/* Max SDIO RX aggregation size of the ADDBA request/response */
-#define MAX_RX_AGG_SIZE_8260_SDIO      28
+/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
+#define MAX_RX_AGG_SIZE_8260_SDIO      21
+#define MAX_TX_AGG_SIZE_8260_SDIO      40
 
 /* Max A-MPDU exponent for HT and VHT */
 #define MAX_HT_AMPDU_EXPONENT_8260_SDIO        IEEE80211_HT_MAX_AMPDU_32K
@@ -204,6 +205,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+       .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
        .disable_dummy_notification = true,
        .max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
        .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
@@ -217,6 +219,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+       .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
        .bt_shared_single_ant = true,
        .disable_dummy_notification = true,
        .max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
index fa716618735e05b03fdde50edd0205d4877b8020..543abeaffcf0017cd1ede2d6fb0ba3d03b3b392b 100644 (file)
 #define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
 #define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
 #define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
+#define CSR_INT_BIT_PAGING       (1 << 24) /* SDIO PAGING */
 #define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
 #define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
 #define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses */
                                 CSR_INT_BIT_HW_ERR  | \
                                 CSR_INT_BIT_FH_TX   | \
                                 CSR_INT_BIT_SW_ERR  | \
+                                CSR_INT_BIT_PAGING  | \
                                 CSR_INT_BIT_RF_KILL | \
                                 CSR_INT_BIT_SW_RX   | \
                                 CSR_INT_BIT_WAKEUP  | \
index 04e6649340b8c2b34d44f196aa20c531cbb83197..71a78cede9b079d61a6d557b0fa362a5d3fe7bc0 100644 (file)
@@ -35,8 +35,8 @@
 TRACE_EVENT(iwlwifi_dev_tx_data,
        TP_PROTO(const struct device *dev,
                 struct sk_buff *skb,
-                void *data, size_t data_len),
-       TP_ARGS(dev, skb, data, data_len),
+                u8 hdr_len, size_t data_len),
+       TP_ARGS(dev, skb, hdr_len, data_len),
        TP_STRUCT__entry(
                DEV_ENTRY
 
@@ -45,7 +45,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data,
        TP_fast_assign(
                DEV_ASSIGN;
                if (iwl_trace_data(skb))
-                       memcpy(__get_dynamic_array(data), data, data_len);
+                       skb_copy_bits(skb, hdr_len,
+                                     __get_dynamic_array(data), data_len);
        ),
        TP_printk("[%s] TX frame data", __get_str(dev))
 );
index 6685259927f81bd979c20b90d0f9970358c98d46..a86aa5bcee7dd082105b18f56c01009aa910c53f 100644 (file)
@@ -372,6 +372,30 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
        return 0;
 }
 
+static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+                               const u32 len)
+{
+       struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
+       struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
+
+       if (len < sizeof(*fw_capa))
+               return -EINVAL;
+
+       capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
+       capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
+       capa->max_ap_cache_per_scan =
+               le32_to_cpu(fw_capa->max_ap_cache_per_scan);
+       capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
+       capa->max_scan_reporting_threshold =
+               le32_to_cpu(fw_capa->max_scan_reporting_threshold);
+       capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
+       capa->max_significant_change_aps =
+               le32_to_cpu(fw_capa->max_significant_change_aps);
+       capa->max_bssid_history_entries =
+               le32_to_cpu(fw_capa->max_bssid_history_entries);
+       return 0;
+}
+
 /*
  * Gets uCode section from tlv.
  */
@@ -573,13 +597,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
        size_t len = ucode_raw->size;
        const u8 *data;
        u32 tlv_len;
+       u32 usniffer_img;
        enum iwl_ucode_tlv_type tlv_type;
        const u8 *tlv_data;
        char buildstr[25];
-       u32 build;
+       u32 build, paging_mem_size;
        int num_of_cpus;
        bool usniffer_images = false;
        bool usniffer_req = false;
+       bool gscan_capa = false;
 
        if (len < sizeof(*ucode)) {
                IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -955,12 +981,46 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                            IWL_UCODE_REGULAR_USNIFFER,
                                            tlv_len);
                        break;
+               case IWL_UCODE_TLV_PAGING:
+                       if (tlv_len != sizeof(u32))
+                               goto invalid_tlv_len;
+                       paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+
+                       IWL_DEBUG_FW(drv,
+                                    "Paging: paging enabled (size = %u bytes)\n",
+                                    paging_mem_size);
+
+                       if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
+                               IWL_ERR(drv,
+                                       "Paging: driver supports up to %lu bytes for paging image\n",
+                                       MAX_PAGING_IMAGE_SIZE);
+                               return -EINVAL;
+                       }
+
+                       if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
+                               IWL_ERR(drv,
+                                       "Paging: image isn't multiple %lu\n",
+                                       FW_PAGING_SIZE);
+                               return -EINVAL;
+                       }
+
+                       drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
+                               paging_mem_size;
+                       usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
+                       drv->fw.img[usniffer_img].paging_mem_size =
+                               paging_mem_size;
+                       break;
                case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        drv->fw.sdio_adma_addr =
                                le32_to_cpup((__le32 *)tlv_data);
                        break;
+               case IWL_UCODE_TLV_FW_GSCAN_CAPA:
+                       if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
+                               goto invalid_tlv_len;
+                       gscan_capa = true;
+                       break;
                default:
                        IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
                        break;
@@ -979,6 +1039,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                return -EINVAL;
        }
 
+       /*
+        * If ucode advertises that it supports GSCAN but GSCAN
+        * capabilities TLV is not present, warn and continue without GSCAN.
+        */
+       if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+           WARN(!gscan_capa,
+                "GSCAN is supported but capabilities TLV is unavailable\n"))
+               __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
+                           capa->_capa);
+
        return 0;
 
  invalid_tlv_len:
index 21302b6f2bfd79a8e8617a345e3771f6608c0145..acc3d186c5c101b0834fb4e424fe2203a66bb2d0 100644 (file)
@@ -713,12 +713,12 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
        struct ieee80211_channel *chan = &data->channels[0];
        int n = 0, idx = 0;
 
-       while (chan->band != band && idx < n_channels)
+       while (idx < n_channels && chan->band != band)
                chan = &data->channels[++idx];
 
        sband->channels = &data->channels[idx];
 
-       while (chan->band == band && idx < n_channels) {
+       while (idx < n_channels && chan->band == band) {
                chan = &data->channels[++idx];
                n++;
        }
index d45dc021cda2c0715b8d7e740ff90b46589ae141..d56064861a9c353dfb9fcf1720e1abde6c3fcf9d 100644 (file)
@@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /**
  * struct iwl_rb_status - reserve buffer status
  *     host memory mapped FH registers
index e57dbd0ef2e1f10f8d16bd26537f2ee829b5eade..af5b3201492cb7a690ec993d784866b1630d91a5 100644 (file)
@@ -84,6 +84,8 @@
  * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
  * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
  *     Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ *     &struct iwl_fw_error_dump_rb
  */
 enum iwl_fw_error_dump_type {
        /* 0 is deprecated */
@@ -97,6 +99,7 @@ enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_FH_REGS = 8,
        IWL_FW_ERROR_DUMP_MEM = 9,
        IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+       IWL_FW_ERROR_DUMP_RB = 11,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
@@ -222,6 +225,20 @@ struct iwl_fw_error_dump_mem {
        u8 data[];
 };
 
+/**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+       __le32 index;
+       __le32 rxq;
+       __le32 reserved;
+       u8 data[];
+};
+
 /**
  * iwl_fw_error_next_data - advance fw error dump data pointer
  * @data: previous data block
index 926e4568d36c8eac0636340641769f7a6d6d42d9..84653e3d02bab52fc44fbc5b047aa3b5286b0154 100644 (file)
@@ -132,12 +132,14 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_API_CHANGES_SET   = 29,
        IWL_UCODE_TLV_ENABLED_CAPABILITIES      = 30,
        IWL_UCODE_TLV_N_SCAN_CHANNELS           = 31,
+       IWL_UCODE_TLV_PAGING            = 32,
        IWL_UCODE_TLV_SEC_RT_USNIFFER   = 34,
        IWL_UCODE_TLV_SDIO_ADMA_ADDR    = 35,
        IWL_UCODE_TLV_FW_VERSION        = 36,
        IWL_UCODE_TLV_FW_DBG_DEST       = 38,
        IWL_UCODE_TLV_FW_DBG_CONF       = 39,
        IWL_UCODE_TLV_FW_DBG_TRIGGER    = 40,
+       IWL_UCODE_TLV_FW_GSCAN_CAPA     = 50,
 };
 
 struct iwl_ucode_tlv {
@@ -257,6 +259,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
  * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
  * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
  *     instead of 3.
+ * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
+ *     (command version 3) that supports per-chain limits
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
@@ -272,6 +276,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_STATS_V10             = (__force iwl_ucode_tlv_api_t)19,
        IWL_UCODE_TLV_API_NEW_VERSION           = (__force iwl_ucode_tlv_api_t)20,
        IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY     = (__force iwl_ucode_tlv_api_t)24,
+       IWL_UCODE_TLV_API_TX_POWER_CHAIN        = (__force iwl_ucode_tlv_api_t)27,
 };
 
 typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
@@ -305,6 +310,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  *     IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
  *     is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)0,
@@ -326,6 +332,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = (__force iwl_ucode_tlv_capa_t)28,
        IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = (__force iwl_ucode_tlv_capa_t)29,
        IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = (__force iwl_ucode_tlv_capa_t)30,
+       IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT                = (__force iwl_ucode_tlv_capa_t)31,
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -343,8 +350,9 @@ enum iwl_ucode_tlv_capa {
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
-#define IWL_UCODE_SECTION_MAX 12
+#define IWL_UCODE_SECTION_MAX 16
 #define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION       0xAAAABBBB
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
 #define IWL_UCODE_MAJOR(ver)   (((ver) & 0xFF000000) >> 24)
@@ -493,10 +501,13 @@ struct iwl_fw_dbg_conf_hcmd {
  *
  * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
  * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ *     collect only monitor data
  */
 enum iwl_fw_dbg_trigger_mode {
        IWL_FW_DBG_TRIGGER_START = BIT(0),
        IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+       IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
 };
 
 /**
@@ -726,4 +737,28 @@ struct iwl_fw_dbg_conf_tlv {
        struct iwl_fw_dbg_conf_hcmd hcmd;
 } __packed;
 
+/**
+ * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *     change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *     hold.
+ */
+struct iwl_fw_gscan_capabilities {
+       __le32 max_scan_cache_size;
+       __le32 max_scan_buckets;
+       __le32 max_ap_cache_per_scan;
+       __le32 max_rssi_sample_size;
+       __le32 max_scan_reporting_threshold;
+       __le32 max_hotlist_aps;
+       __le32 max_significant_change_aps;
+       __le32 max_bssid_history_entries;
+} __packed;
+
 #endif  /* __iwl_fw_file_h__ */
index 3e3c9d8b3c37dd93b7ab59529bf5c7b3cd9d7ae0..45e732150d28ab9de90d322528fc8429c2a26d97 100644 (file)
@@ -133,6 +133,7 @@ struct fw_desc {
 struct fw_img {
        struct fw_desc sec[IWL_UCODE_SECTION_MAX];
        bool is_dual_cpus;
+       u32 paging_mem_size;
 };
 
 struct iwl_sf_region {
@@ -140,6 +141,48 @@ struct iwl_sf_region {
        u32 size;
 };
 
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS        0
+#define PAGING_TLV_SECURE_MASK 1
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ */
+struct iwl_fw_paging {
+       dma_addr_t fw_paging_phys;
+       struct page *fw_paging_block;
+       u32 fw_paging_size;
+};
+
 /**
  * struct iwl_fw_cscheme_list - a cipher scheme list
  * @size: a number of entries
@@ -150,6 +193,30 @@ struct iwl_fw_cscheme_list {
        struct iwl_fw_cipher_scheme cs[];
 } __packed;
 
+/**
+ * struct iwl_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *     change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *     hold.
+ */
+struct iwl_gscan_capabilities {
+       u32 max_scan_cache_size;
+       u32 max_scan_buckets;
+       u32 max_ap_cache_per_scan;
+       u32 max_rssi_sample_size;
+       u32 max_scan_reporting_threshold;
+       u32 max_hotlist_aps;
+       u32 max_significant_change_aps;
+       u32 max_bssid_history_entries;
+};
+
 /**
  * struct iwl_fw - variables associated with the firmware
  *
@@ -208,6 +275,7 @@ struct iwl_fw {
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        u8 dbg_dest_reg_num;
+       struct iwl_gscan_capabilities gscan_capa;
 };
 
 static inline const char *get_fw_dbg_mode_string(int mode)
index 71b450adbda0d73d26b08200260e37fee0c76129..b47fe9d6b97abacdc2c72256f261c8ee520da22e 100644 (file)
@@ -116,10 +116,6 @@ struct iwl_cfg;
  *     May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *     HCMD this Rx responds to. Can't sleep.
- * @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
- *     but the higher layers need to know about it (in particular mac80211 to
- *     to able to call the right NAPI RX functions); this function is needed
- *     to eventually call netif_napi_add() with higher layer involvement.
  * @queue_full: notifies that a HW queue is full.
  *     Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -148,12 +144,8 @@ struct iwl_op_mode_ops {
                                     const struct iwl_fw *fw,
                                     struct dentry *dbgfs_dir);
        void (*stop)(struct iwl_op_mode *op_mode);
-       void (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb);
-       void (*napi_add)(struct iwl_op_mode *op_mode,
-                        struct napi_struct *napi,
-                        struct net_device *napi_dev,
-                        int (*poll)(struct napi_struct *, int),
-                        int weight);
+       void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+                  struct iwl_rx_cmd_buffer *rxb);
        void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
        void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
        bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -188,9 +180,10 @@ static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
 }
 
 static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+                                 struct napi_struct *napi,
                                  struct iwl_rx_cmd_buffer *rxb)
 {
-       return op_mode->ops->rx(op_mode, rxb);
+       return op_mode->ops->rx(op_mode, napi, rxb);
 }
 
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
@@ -258,15 +251,4 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
        return op_mode->ops->exit_d0i3(op_mode);
 }
 
-static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
-                                       struct napi_struct *napi,
-                                       struct net_device *napi_dev,
-                                       int (*poll)(struct napi_struct *, int),
-                                       int weight)
-{
-       if (!op_mode->ops->napi_add)
-               return;
-       op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
-}
-
 #endif /* __iwl_op_mode_h__ */
index cd98b9f45415b107cdbec91a3b09bd3449ca0a42..3ab777f79e4f60bcf77b00f8331a7b1df69475b2 100644 (file)
@@ -383,6 +383,8 @@ enum aux_misc_master1_en {
 #define AUX_MISC_MASTER1_SMPHR_STATUS  0xA20800
 #define RSA_ENABLE                     0xA24B08
 #define PREG_AUX_BUS_WPROT_0           0xA04CC0
+#define SB_CPU_1_STATUS                        0xA01E30
+#define SB_CPU_2_STATUS                        0xA01E34
 
 /* FW chicken bits */
 #define LMPM_CHICK                     0xA01FF8
@@ -390,4 +392,10 @@ enum {
        LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
 };
 
+/* FW chicken bits */
+#define LMPM_PAGE_PASS_NOTIF                   0xA03824
+enum {
+       LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
+};
+
 #endif                         /* __iwl_prph_h__ */
index 2f79e54823c476bff7e5ad0a8501f7966b4803ab..c829c505e1419cf3ce84356f9a07c4b85b8ae447 100644 (file)
@@ -248,6 +248,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
  *     (i.e. mark it as non-idle).
+ * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
+ *     check that we leave enough room for the TBs bitmap which needs 20 bits.
  */
 enum CMD_MODE {
        CMD_ASYNC               = BIT(0),
@@ -257,6 +259,8 @@ enum CMD_MODE {
        CMD_SEND_IN_IDLE        = BIT(4),
        CMD_MAKE_TRANS_IDLE     = BIT(5),
        CMD_WAKE_UP_TRANS       = BIT(6),
+
+       CMD_TB_BITMAP_POS       = 11,
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -604,7 +608,9 @@ struct iwl_trans_ops {
        int  (*suspend)(struct iwl_trans *trans);
        void (*resume)(struct iwl_trans *trans);
 
-       struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
+       struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
+                                                struct iwl_fw_dbg_trigger_tlv
+                                                *trigger);
 };
 
 /**
@@ -641,6 +647,8 @@ enum iwl_d0i3_mode {
  * @cfg - pointer to the configuration
  * @status: a bit-mask of transport status flags
  * @dev - pointer to struct device * that represents the device
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ *     0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
  * @hw_id: a u32 with the ID of the device / sub-device.
  *     Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
@@ -660,6 +668,12 @@ enum iwl_d0i3_mode {
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @paging_req_addr: The location were the FW will upload / download the pages
+ *     from. The address is set by the opmode
+ * @paging_db: Pointer to the opmode paging data base, the pointer is set by
+ *     the opmode.
+ * @paging_download_buf: Buffer used for copying all of the pages before
+ *     downloading them to the FW. The buffer is allocated in the opmode
  */
 struct iwl_trans {
        const struct iwl_trans_ops *ops;
@@ -669,6 +683,7 @@ struct iwl_trans {
        unsigned long status;
 
        struct device *dev;
+       u32 max_skb_frags;
        u32 hw_rev;
        u32 hw_id;
        char hw_id_str[52];
@@ -696,6 +711,14 @@ struct iwl_trans {
        struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
        u8 dbg_dest_reg_num;
 
+       /*
+        * Paging parameters - All of the parameters should be set by the
+        * opmode when paging is enabled
+        */
+       u32 paging_req_addr;
+       struct iwl_fw_paging *paging_db;
+       void *paging_download_buf;
+
        enum iwl_d0i3_mode d0i3_mode;
 
        bool wowlan_d0i3;
@@ -787,7 +810,8 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
 {
        might_sleep();
-       trans->ops->d3_suspend(trans, test);
+       if (trans->ops->d3_suspend)
+               trans->ops->d3_suspend(trans, test);
 }
 
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
@@ -795,6 +819,9 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
                                      bool test)
 {
        might_sleep();
+       if (!trans->ops->d3_resume)
+               return 0;
+
        return trans->ops->d3_resume(trans, status, test);
 }
 
@@ -825,11 +852,12 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
 }
 
 static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans)
+iwl_trans_dump_data(struct iwl_trans *trans,
+                   struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        if (!trans->ops->dump_data)
                return NULL;
-       return trans->ops->dump_data(trans);
+       return trans->ops->dump_data(trans, trigger);
 }
 
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
index ddb1c844827b9f1b8bdbd8223a20f6f85750e4cb..383a3162046c143129308f906269dbeba95bac16 100644 (file)
@@ -911,9 +911,9 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
                int size = sizeof(struct iwl_tof_range_req_ap_entry);
                u16 burst_period;
                u8 *mac = ap.bssid;
-               int i;
+               unsigned int i;
 
-               if (sscanf(data, "%d %hhd %hhx %hhx"
+               if (sscanf(data, "%u %hhd %hhx %hhx"
                           "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
                           "%hhx %hhx %hx"
                           "%hhx %hhx %x"
@@ -929,7 +929,7 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
                        ret = -EINVAL;
                        goto out;
                }
-               if (i > IWL_MVM_TOF_MAX_APS) {
+               if (i >= IWL_MVM_TOF_MAX_APS) {
                        IWL_ERR(mvm, "Invalid AP index %d\n", i);
                        ret = -EINVAL;
                        goto out;
index ffb4b5cef27570bbe4af0670683a41891b673330..7d69a556bcc820861692b2349d85d470eec69ca0 100644 (file)
@@ -949,9 +949,10 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
                                           char *buf, size_t count,
                                           loff_t *ppos)
 {
-       int ret, conf_id;
+       unsigned int conf_id;
+       int ret;
 
-       ret = kstrtoint(buf, 0, &conf_id);
+       ret = kstrtouint(buf, 0, &conf_id);
        if (ret)
                return ret;
 
@@ -974,7 +975,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
        if (ret)
                return ret;
 
-       iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
+       iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
 
        iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 
@@ -1200,12 +1201,7 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
        if (ptr) {
                for (ofs = 0; ofs < len; ofs += 16) {
                        pos += scnprintf(buf + pos, bufsz - pos,
-                                        "0x%.4x ", ofs);
-                       hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
-                                          bufsz - pos, false);
-                       pos += strlen(buf + pos);
-                       if (bufsz - pos > 0)
-                               buf[pos++] = '\n';
+                                        "0x%.4x %16ph\n", ofs, ptr + ofs);
                }
        } else {
                pos += scnprintf(buf + pos, bufsz - pos,
index d7658d16e965c835144d8804d4e7185d605e8bfd..20521bebb0b15b8d5cc154b785d78865ad599e0c 100644 (file)
@@ -339,8 +339,13 @@ enum iwl_wowlan_wakeup_reason {
        IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE                 = BIT(8),
        IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS                 = BIT(9),
        IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE           = BIT(10),
-       /* BIT(11) reserved */
+       IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL              = BIT(11),
        IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET             = BIT(12),
+       IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET                  = BIT(13),
+       IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER               = BIT(14),
+       IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN                = BIT(15),
+       IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN                 = BIT(16),
+
 }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
 
 struct iwl_wowlan_gtk_status {
index b86b1697d56f6807011faa40188094f47140a99a..7005fa4be74a18f536762c687529085452d89f49 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -310,17 +312,22 @@ struct iwl_reduce_tx_power_cmd {
        __le16 pwr_restriction;
 } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
 
+enum iwl_dev_tx_power_cmd_mode {
+       IWL_TX_POWER_MODE_SET_MAC = 0,
+       IWL_TX_POWER_MODE_SET_DEVICE = 1,
+       IWL_TX_POWER_MODE_SET_CHAINS = 2,
+}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */;
+
 /**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * REDUCE_TX_POWER_CMD = 0x9f
- * @set_mode: 0 - MAC tx power, 1 - device tx power
+ * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
+ * @set_mode: see &enum iwl_dev_tx_power_cmd_mode
  * @mac_context_id: id of the mac ctx for which we are reducing TX power.
  * @pwr_restriction: TX power restriction in 1/8 dBms.
  * @dev_24: device TX power restriction in 1/8 dBms
  * @dev_52_low: device TX power restriction upper band - low
  * @dev_52_high: device TX power restriction upper band - high
  */
-struct iwl_dev_tx_power_cmd {
+struct iwl_dev_tx_power_cmd_v2 {
        __le32 set_mode;
        __le32 mac_context_id;
        __le16 pwr_restriction;
@@ -329,6 +336,20 @@ struct iwl_dev_tx_power_cmd {
        __le16 dev_52_high;
 } __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
 
+#define IWL_NUM_CHAIN_LIMITS   2
+#define IWL_NUM_SUB_BANDS      5
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * @v2: version 2 of the command, embedded here for easier software handling
+ * @per_chain_restriction: per chain restrictions
+ */
+struct iwl_dev_tx_power_cmd {
+       /* v3 is just an extension of v2 - keep this here */
+       struct iwl_dev_tx_power_cmd_v2 v2;
+       __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
+
 #define IWL_DEV_MAX_TX_POWER 0x7FFF
 
 /**
index 21dd5b771660f0f689798b650fbb4acd1eed3d21..493a8bdfbc9e4bb89180bc75bdd462d88b67255a 100644 (file)
@@ -366,8 +366,8 @@ struct iwl_mvm_rm_sta_cmd {
  * ( MGMT_MCAST_KEY = 0x1f )
  * @ctrl_flags: %iwl_sta_key_flag
  * @IGTK:
- * @K1: IGTK master key
- * @K2: IGTK sub key
+ * @K1: unused
+ * @K2: unused
  * @sta_id: station ID that support IGTK
  * @key_id:
  * @receive_seq_cnt: initial RSC/PN needed for replay check
index 81c4ea3c6958440b105c239f755b85a6a36fe73c..853698ab8b05cc958105755073953c8d999ffaad 100644 (file)
@@ -124,6 +124,18 @@ enum iwl_tx_flags {
        TX_CMD_FLG_HCCA_CHUNK           = BIT(31)
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+       PM_FRAME_NONE           = 0,
+       PM_FRAME_MGMT           = 2,
+       PM_FRAME_ASSOC          = 3,
+};
+
 /*
  * TX command security control
  */
index 4e29c11cc96962e494061f90bbfc0da96fd48c21..4af7513adda22e3a08a59361313659ba0ff8d450 100644 (file)
@@ -120,6 +120,9 @@ enum {
        ADD_STA = 0x18,
        REMOVE_STA = 0x19,
 
+       /* paging get item */
+       FW_GET_ITEM_CMD = 0x1a,
+
        /* TX */
        TX_CMD = 0x1c,
        TXPATH_FLUSH = 0x1e,
@@ -149,6 +152,9 @@ enum {
 
        LQ_CMD = 0x4e,
 
+       /* paging block to FW cpu2 */
+       FW_PAGING_BLOCK_CMD = 0x4f,
+
        /* Scan offload */
        SCAN_OFFLOAD_REQUEST_CMD = 0x51,
        SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -370,6 +376,50 @@ struct iwl_nvm_access_cmd {
        u8 data[];
 } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
 
+#define NUM_OF_FW_PAGING_BLOCKS        33 /* 32 for data and 1 block for CSS */
+
+/*
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * (FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+*/
+struct iwl_fw_paging_cmd {
+       __le32 flags;
+       __le32 block_size;
+       __le32 block_num;
+       __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+/*
+ * Fw items ID's
+ *
+ * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
+ *     download
+ */
+enum iwl_fw_item_id {
+       IWL_FW_ITEM_ID_PAGING = 3,
+};
+
+/*
+ * struct iwl_fw_get_item_cmd - get an item from the fw
+ */
+struct iwl_fw_get_item_cmd {
+       __le32 item_id;
+} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
+
+struct iwl_fw_get_item_resp {
+       __le32 item_id;
+       __le32 item_byte_cnt;
+       __le32 item_val;
+} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
+
 /**
  * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
  * @offset: offset in bytes into the section
index 106edc78c8bc076177ef45091871b3c65836c5e5..4a0ce83315bdd212d1714956af8900ea271f62b6 100644 (file)
@@ -106,6 +106,306 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
                                    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 
+static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+{
+       int i;
+
+       if (!mvm->fw_paging_db[0].fw_paging_block)
+               return;
+
+       for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+               if (!mvm->fw_paging_db[i].fw_paging_block) {
+                       IWL_DEBUG_FW(mvm,
+                                    "Paging: block %d already freed, continue to next page\n",
+                                    i);
+
+                       continue;
+               }
+
+               __free_pages(mvm->fw_paging_db[i].fw_paging_block,
+                            get_order(mvm->fw_paging_db[i].fw_paging_size));
+       }
+       kfree(mvm->trans->paging_download_buf);
+       memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
+}
+
+static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
+{
+       int sec_idx, idx;
+       u32 offset = 0;
+
+       /*
+        * find where is the paging image start point:
+        * if CPU2 exist and it's in paging format, then the image looks like:
+        * CPU1 sections (2 or more)
+        * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+        * CPU2 sections (not paged)
+        * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+        * non paged to CPU2 paging sec
+        * CPU2 paging CSS
+        * CPU2 paging image (including instruction and data)
+        */
+       for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+               if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+                       sec_idx++;
+                       break;
+               }
+       }
+
+       if (sec_idx >= IWL_UCODE_SECTION_MAX) {
+               IWL_ERR(mvm, "driver didn't find paging image\n");
+               iwl_free_fw_paging(mvm);
+               return -EINVAL;
+       }
+
+       /* copy the CSS block to the dram */
+       IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
+                    sec_idx);
+
+       memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
+              image->sec[sec_idx].data,
+              mvm->fw_paging_db[0].fw_paging_size);
+
+       IWL_DEBUG_FW(mvm,
+                    "Paging: copied %d CSS bytes to first block\n",
+                    mvm->fw_paging_db[0].fw_paging_size);
+
+       sec_idx++;
+
+       /*
+        * copy the paging blocks to the dram
+        * loop index start from 1 since that CSS block already copied to dram
+        * and CSS index is 0.
+        * loop stop at num_of_paging_blk since that last block is not full.
+        */
+       for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
+               memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+                      image->sec[sec_idx].data + offset,
+                      mvm->fw_paging_db[idx].fw_paging_size);
+
+               IWL_DEBUG_FW(mvm,
+                            "Paging: copied %d paging bytes to block %d\n",
+                            mvm->fw_paging_db[idx].fw_paging_size,
+                            idx);
+
+               offset += mvm->fw_paging_db[idx].fw_paging_size;
+       }
+
+       /* copy the last paging block */
+       if (mvm->num_of_pages_in_last_blk > 0) {
+               memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+                      image->sec[sec_idx].data + offset,
+                      FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+
+               IWL_DEBUG_FW(mvm,
+                            "Paging: copied %d pages in the last block %d\n",
+                            mvm->num_of_pages_in_last_blk, idx);
+       }
+
+       return 0;
+}
+
+static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
+                                  const struct fw_img *image)
+{
+       struct page *block;
+       dma_addr_t phys = 0;
+       int blk_idx = 0;
+       int order, num_of_pages;
+       int dma_enabled;
+
+       if (mvm->fw_paging_db[0].fw_paging_block)
+               return 0;
+
+       dma_enabled = is_device_dma_capable(mvm->trans->dev);
+
+       /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+       BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+       num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+       mvm->num_of_paging_blk = ((num_of_pages - 1) /
+                                   NUM_OF_PAGE_PER_GROUP) + 1;
+
+       mvm->num_of_pages_in_last_blk =
+               num_of_pages -
+               NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
+
+       IWL_DEBUG_FW(mvm,
+                    "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+                    mvm->num_of_paging_blk,
+                    mvm->num_of_pages_in_last_blk);
+
+       /* allocate block of 4Kbytes for paging CSS */
+       order = get_order(FW_PAGING_SIZE);
+       block = alloc_pages(GFP_KERNEL, order);
+       if (!block) {
+               /* free all the previous pages since we failed */
+               iwl_free_fw_paging(mvm);
+               return -ENOMEM;
+       }
+
+       mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+       mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
+
+       if (dma_enabled) {
+               phys = dma_map_page(mvm->trans->dev, block, 0,
+                                   PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(mvm->trans->dev, phys)) {
+                       /*
+                        * free the previous pages and the current one since
+                        * we failed to map_page.
+                        */
+                       iwl_free_fw_paging(mvm);
+                       return -ENOMEM;
+               }
+               mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+       } else {
+               mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
+                       blk_idx << BLOCK_2_EXP_SIZE;
+       }
+
+       IWL_DEBUG_FW(mvm,
+                    "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+                    order);
+
+       /*
+        * allocate blocks in dram.
+        * since that CSS allocated in fw_paging_db[0] loop start from index 1
+        */
+       for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+               /* allocate block of PAGING_BLOCK_SIZE (32K) */
+               order = get_order(PAGING_BLOCK_SIZE);
+               block = alloc_pages(GFP_KERNEL, order);
+               if (!block) {
+                       /* free all the previous pages since we failed */
+                       iwl_free_fw_paging(mvm);
+                       return -ENOMEM;
+               }
+
+               mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+               mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+
+               if (dma_enabled) {
+                       phys = dma_map_page(mvm->trans->dev, block, 0,
+                                           PAGE_SIZE << order,
+                                           DMA_BIDIRECTIONAL);
+                       if (dma_mapping_error(mvm->trans->dev, phys)) {
+                               /*
+                                * free the previous pages and the current one
+                                * since we failed to map_page.
+                                */
+                               iwl_free_fw_paging(mvm);
+                               return -ENOMEM;
+                       }
+                       mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+               } else {
+                       mvm->fw_paging_db[blk_idx].fw_paging_phys =
+                               PAGING_ADDR_SIG |
+                               blk_idx << BLOCK_2_EXP_SIZE;
+               }
+
+               IWL_DEBUG_FW(mvm,
+                            "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+                            order);
+       }
+
+       return 0;
+}
+
+static int iwl_save_fw_paging(struct iwl_mvm *mvm,
+                             const struct fw_img *fw)
+{
+       int ret;
+
+       ret = iwl_alloc_fw_paging_mem(mvm, fw);
+       if (ret)
+               return ret;
+
+       return iwl_fill_paging_mem(mvm, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
+{
+       int blk_idx;
+       __le32 dev_phy_addr;
+       struct iwl_fw_paging_cmd fw_paging_cmd = {
+               .flags =
+                       cpu_to_le32(PAGING_CMD_IS_SECURED |
+                                   PAGING_CMD_IS_ENABLED |
+                                   (mvm->num_of_pages_in_last_blk <<
+                                   PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+               .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+               .block_num = cpu_to_le32(mvm->num_of_paging_blk),
+       };
+
+       /* loop for for all paging blocks + CSS block */
+       for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+               dev_phy_addr =
+                       cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
+                                   PAGE_2_EXP_SIZE);
+               fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+       }
+
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+}
+
+/*
+ * Send paging item cmd to FW in case CPU2 has paging image
+ */
+static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
+{
+       int ret;
+       struct iwl_fw_get_item_cmd fw_get_item_cmd = {
+               .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
+       };
+
+       struct iwl_fw_get_item_resp *item_resp;
+       struct iwl_host_cmd cmd = {
+               .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+               .data = { &fw_get_item_cmd, },
+       };
+
+       cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret) {
+               IWL_ERR(mvm,
+                       "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
+                       ret);
+               return ret;
+       }
+
+       item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
+       if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
+               IWL_ERR(mvm,
+                       "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
+                       le32_to_cpu(item_resp->item_id));
+               ret = -EIO;
+               goto exit;
+       }
+
+       mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+                                                 GFP_KERNEL);
+       if (!mvm->trans->paging_download_buf) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+       mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
+       mvm->trans->paging_db = mvm->fw_paging_db;
+       IWL_DEBUG_FW(mvm,
+                    "Paging: got paging request address (paging_req_addr 0x%08x)\n",
+                    mvm->trans->paging_req_addr);
+
+exit:
+       iwl_free_resp(&cmd);
+
+       return ret;
+}
+
 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
                         struct iwl_rx_packet *pkt, void *data)
 {
@@ -244,6 +544,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
                                    MVM_UCODE_ALIVE_TIMEOUT);
        if (ret) {
+               if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+                       IWL_ERR(mvm,
+                               "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+                               iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
+                               iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
                mvm->cur_ucode = old_type;
                return ret;
        }
@@ -268,6 +573,40 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 
        iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 
+       /*
+        * configure and operate fw paging mechanism.
+        * driver configures the paging flow only once, CPU2 paging image
+        * included in the IWL_UCODE_INIT image.
+        */
+       if (fw->paging_mem_size) {
+               /*
+                * When dma is not enabled, the driver needs to copy / write
+                * the downloaded / uploaded page to / from the smem.
+                * This gets the location of the place were the pages are
+                * stored.
+                */
+               if (!is_device_dma_capable(mvm->trans->dev)) {
+                       ret = iwl_trans_get_paging_item(mvm);
+                       if (ret) {
+                               IWL_ERR(mvm, "failed to get FW paging item\n");
+                               return ret;
+                       }
+               }
+
+               ret = iwl_save_fw_paging(mvm, fw);
+               if (ret) {
+                       IWL_ERR(mvm, "failed to save the FW paging image\n");
+                       return ret;
+               }
+
+               ret = iwl_send_paging_cmd(mvm, fw);
+               if (ret) {
+                       IWL_ERR(mvm, "failed to send the paging cmd\n");
+                       iwl_free_fw_paging(mvm);
+                       return ret;
+               }
+       }
+
        /*
         * Note: all the queues are enabled as part of the interface
         * initialization, but in firmware restart scenarios they
@@ -472,8 +811,13 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
 
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
                                struct iwl_mvm_dump_desc *desc,
-                               unsigned int delay)
+                               struct iwl_fw_dbg_trigger_tlv *trigger)
 {
+       unsigned int delay = 0;
+
+       if (trigger)
+               delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+
        if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
                return -EBUSY;
 
@@ -484,6 +828,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
                 le32_to_cpu(desc->trig_desc.type));
 
        mvm->fw_dump_desc = desc;
+       mvm->fw_dump_trig = trigger;
 
        queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
 
@@ -491,7 +836,8 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 }
 
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-                          const char *str, size_t len, unsigned int delay)
+                          const char *str, size_t len,
+                          struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        struct iwl_mvm_dump_desc *desc;
 
@@ -503,14 +849,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
        desc->trig_desc.type = cpu_to_le32(trig);
        memcpy(desc->trig_desc.data, str, len);
 
-       return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+       return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
 }
 
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                                struct iwl_fw_dbg_trigger_tlv *trigger,
                                const char *fmt, ...)
 {
-       unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
        u16 occurrences = le16_to_cpu(trigger->occurrences);
        int ret, len = 0;
        char buf[64];
@@ -534,8 +879,9 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                len = strlen(buf) + 1;
        }
 
-       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
-                                    len, delay);
+       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
+                                    trigger);
+
        if (ret)
                return ret;
 
index 9e641847c0472a12ced4d44bc994e51706e3e3bb..aa8c2b7f23c73862f0526f109ad647cc0c80a259 100644 (file)
@@ -641,6 +641,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                        IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
                IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
                hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+               ieee80211_hw_set(hw, TDLS_WIDER_BW);
        }
 
        if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -1124,9 +1125,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        u32 file_len, fifo_data_len = 0;
        u32 smem_len = mvm->cfg->smem_len;
        u32 sram2_len = mvm->cfg->dccm2_len;
+       bool monitor_dump_only = false;
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (mvm->fw_dump_trig &&
+           mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
+               monitor_dump_only = true;
+
        fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
        if (!fw_error_dump)
                return;
@@ -1178,6 +1184,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                   fifo_data_len +
                   sizeof(*dump_info);
 
+       /* Make room for the SMEM, if it exists */
+       if (smem_len)
+               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
+
+       /* Make room for the secondary SRAM, if it exists */
+       if (sram2_len)
+               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+
+       /* If we only want a monitor dump, reset the file length */
+       if (monitor_dump_only) {
+               file_len = sizeof(*dump_file) + sizeof(*dump_data) +
+                          sizeof(*dump_info);
+       }
+
        /*
         * In 8000 HW family B-step include the ICCM (which resides separately)
         */
@@ -1190,14 +1210,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
                            mvm->fw_dump_desc->len;
 
-       /* Make room for the SMEM, if it exists */
-       if (smem_len)
-               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
-       /* Make room for the secondary SRAM, if it exists */
-       if (sram2_len)
-               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
        dump_file = vzalloc(file_len);
        if (!dump_file) {
                kfree(fw_error_dump);
@@ -1243,6 +1255,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_data = iwl_fw_error_next_data(dump_data);
        }
 
+       /* In case we only want monitor dump, skip to dump trasport data */
+       if (monitor_dump_only)
+               goto dump_trans_data;
+
        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
        dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
        dump_mem = (void *)dump_data->data;
@@ -1286,7 +1302,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                                         dump_mem->data, IWL8260_ICCM_LEN);
        }
 
-       fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
+dump_trans_data:
+       fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
+                                                      mvm->fw_dump_trig);
        fw_error_dump->op_mode_len = file_len;
        if (fw_error_dump->trans_ptr)
                file_len += fw_error_dump->trans_ptr->len;
@@ -1295,6 +1313,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
                      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
 
+       mvm->fw_dump_trig = NULL;
        clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
 }
 
@@ -1576,20 +1595,23 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                s16 tx_power)
 {
        struct iwl_dev_tx_power_cmd cmd = {
-               .set_mode = 0,
-               .mac_context_id =
+               .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+               .v2.mac_context_id =
                        cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
-               .pwr_restriction = cpu_to_le16(8 * tx_power),
+               .v2.pwr_restriction = cpu_to_le16(8 * tx_power),
        };
+       int len = sizeof(cmd);
 
        if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
                return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
 
        if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
-               cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+               cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
 
-       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
-                                   sizeof(cmd), &cmd);
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
+               len = sizeof(cmd.v2);
+
+       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
 }
 
 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
index 95f326dc0b1ff4b373a8362899403a0c3d875e66..b95a07ec9e362bf031f960dee35dc52c97221ed0 100644 (file)
@@ -610,6 +610,11 @@ struct iwl_mvm {
        /* NVM sections */
        struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
 
+       /* Paging section */
+       struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+       u16 num_of_paging_blk;
+       u16 num_of_pages_in_last_blk;
+
        /* EEPROM MAC addresses */
        struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
 
@@ -703,6 +708,7 @@ struct iwl_mvm {
        u8 fw_dbg_conf;
        struct delayed_work fw_dump_wk;
        struct iwl_mvm_dump_desc *fw_dump_desc;
+       struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
 
 #ifdef CONFIG_IWLWIFI_LEDS
        struct led_classdev led;
@@ -1078,7 +1084,8 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
  */
 void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+                       struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
@@ -1438,10 +1445,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
 
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-                          const char *str, size_t len, unsigned int delay);
+                          const char *str, size_t len,
+                          struct iwl_fw_dbg_trigger_tlv *trigger);
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
                                struct iwl_mvm_dump_desc *desc,
-                               unsigned int delay);
+                               struct iwl_fw_dbg_trigger_tlv *trigger);
 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                                struct iwl_fw_dbg_trigger_tlv *trigger,
index 6957d026e4bd1cd442637001551b44dc28d24101..a37de3f410a01e0b594f41d43bfda4360273e324 100644 (file)
@@ -268,7 +268,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
 #undef RX_HANDLER_GRP
 #define CMD(x) [x] = #x
 
-static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
+static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = {
        CMD(MVM_ALIVE),
        CMD(REPLY_ERROR),
        CMD(INIT_COMPLETE_NOTIF),
@@ -288,8 +288,10 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(PHY_CONFIGURATION_CMD),
        CMD(CALIB_RES_NOTIF_PHY_DB),
        CMD(SET_CALIB_DEFAULT_CMD),
+       CMD(FW_PAGING_BLOCK_CMD),
        CMD(ADD_STA_KEY),
        CMD(ADD_STA),
+       CMD(FW_GET_ITEM_CMD),
        CMD(REMOVE_STA),
        CMD(LQ_CMD),
        CMD(SCAN_OFFLOAD_CONFIG_CMD),
@@ -715,6 +717,7 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
 }
 
 static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+                               struct napi_struct *napi,
                                struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -722,7 +725,7 @@ static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
        u8 i;
 
        if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
-               iwl_mvm_rx_rx_mpdu(mvm, rxb);
+               iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
                return;
        }
 
@@ -913,7 +916,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         * can't recover this since we're already half suspended.
         */
        if (!mvm->restart_fw && fw_error) {
-               iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
+               iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
+                                           NULL);
        } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
                                    &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
@@ -1110,9 +1114,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
 
        IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
 
-       /* make sure we have no running tx while configuring the qos */
        set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
-       synchronize_net();
 
        /*
         * iwl_mvm_ref_sync takes a reference before checking the flag.
@@ -1140,6 +1142,9 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
                mvm->d0i3_offloading = false;
        }
 
+       /* make sure we have no running tx while configuring the seqno */
+       synchronize_net();
+
        iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
        ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
                                   sizeof(wowlan_config_cmd),
@@ -1166,15 +1171,25 @@ static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
        iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
 }
 
-static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
-                                        struct ieee80211_vif *vif)
+struct iwl_mvm_wakeup_reason_iter_data {
+       struct iwl_mvm *mvm;
+       u32 wakeup_reasons;
+};
+
+static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac,
+                                           struct ieee80211_vif *vif)
 {
-       struct iwl_mvm *mvm = data;
+       struct iwl_mvm_wakeup_reason_iter_data *data = _data;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
        if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
-           mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
-               iwl_mvm_connection_loss(mvm, vif, "D0i3");
+           data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) {
+               if (data->wakeup_reasons &
+                   IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
+                       iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+               else
+                       ieee80211_beacon_loss(vif);
+       }
 }
 
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
@@ -1242,7 +1257,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
        };
        struct iwl_wowlan_status *status;
        int ret;
-       u32 disconnection_reasons, wakeup_reasons;
+       u32 handled_reasons, wakeup_reasons;
        __le16 *qos_seq = NULL;
 
        mutex_lock(&mvm->mutex);
@@ -1259,13 +1274,18 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
 
        IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
 
-       disconnection_reasons =
-               IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
-               IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
-       if (wakeup_reasons & disconnection_reasons)
+       handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+                               IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+       if (wakeup_reasons & handled_reasons) {
+               struct iwl_mvm_wakeup_reason_iter_data data = {
+                       .mvm = mvm,
+                       .wakeup_reasons = wakeup_reasons,
+               };
+
                ieee80211_iterate_active_interfaces(
                        mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                       iwl_mvm_d0i3_disconnect_iter, mvm);
+                       iwl_mvm_d0i3_wakeup_reason_iter, &data);
+       }
 out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
@@ -1318,17 +1338,6 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
        return _iwl_mvm_exit_d0i3(mvm);
 }
 
-static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
-                            struct napi_struct *napi,
-                            struct net_device *napi_dev,
-                            int (*poll)(struct napi_struct *, int),
-                            int weight)
-{
-       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
-       ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
-}
-
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .start = iwl_op_mode_mvm_start,
        .stop = iwl_op_mode_mvm_stop,
@@ -1342,5 +1351,4 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .nic_config = iwl_mvm_nic_config,
        .enter_d0i3 = iwl_mvm_enter_d0i3,
        .exit_d0i3 = iwl_mvm_exit_d0i3,
-       .napi_add = iwl_mvm_napi_add,
 };
index 506294fc2f878c037464112c307be2bb202deda6..4645877882a6af6ee51a87838f9f3b57e3ab0054 100644 (file)
@@ -288,27 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
        return true;
 }
 
-static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
-{
-       int numerator;
-       int dtim_interval = dtimper * bi;
-
-       if (WARN_ON(!dtim_interval))
-               return 0;
-
-       if (dtimper == 1) {
-               if (bi > 100)
-                       numerator = 408;
-               else
-                       numerator = 510;
-       } else if (dtimper < 10) {
-               numerator = 612;
-       } else {
-               return 0;
-       }
-       return max(1, (numerator / dtim_interval));
-}
-
 static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
 {
        struct ieee80211_chanctx_conf *chanctx_conf;
@@ -358,8 +337,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
-       if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
-           !mvmvif->pm_enabled)
+       if (!vif->bss_conf.ps || !mvmvif->pm_enabled ||
+           (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p))
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -378,11 +357,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
        if (!radar_detect && (dtimper < 10) &&
            (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
             mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-               cmd->skip_dtim_periods =
-                       iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
-               if (cmd->skip_dtim_periods)
-                       cmd->flags |=
-                               cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               cmd->skip_dtim_periods = 3;
        }
 
        if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
index 19a79262e0a0ea98c80139188ccac09f07b1ff87..5ae9c8aa868fa445063e956a26ac1444caf5d8dd 100644 (file)
@@ -177,7 +177,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-       if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+       if (IWL_MVM_RS_DISABLE_P2P_MIMO &&
+           iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
                return false;
 
        if (mvm->nvm_data->sku_cap_mimo_disabled)
index 65746145273e17dd49323fa97edc8f1be7b61e94..c37c10a423ce16b985ce620d2162d58c9c736fad 100644 (file)
@@ -94,6 +94,7 @@ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
  * Adds the rxb to a new skb and give it to mac80211
  */
 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+                                           struct napi_struct *napi,
                                            struct sk_buff *skb,
                                            struct ieee80211_hdr *hdr, u16 len,
                                            u32 ampdu_status, u8 crypt_len,
@@ -127,7 +128,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
                                fraglen, rxb->truesize);
        }
 
-       ieee80211_rx(mvm->hw, skb);
+       ieee80211_rx_napi(mvm->hw, skb, napi);
 }
 
 /*
@@ -253,7 +254,8 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
  *
  * Handles the actual data of the Rx packet from the fw
  */
-void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+                       struct iwl_rx_cmd_buffer *rxb)
 {
        struct ieee80211_hdr *hdr;
        struct ieee80211_rx_status *rx_status;
@@ -442,7 +444,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
        iwl_mvm_update_frame_stats(mvm, rate_n_flags,
                                   rx_status->flag & RX_FLAG_AMPDU_DETAILS);
 #endif
-       iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
+       iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
                                        crypt_len, rxb);
 }
 
index 95678e773c6ff0bf188624392c8b7f0a54f86cd0..56559d4d34ad1d0b70eecdccebdee2a5828921e6 100644 (file)
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 
-struct iwl_mvm_scan_params {
-       u32 max_out_time;
+enum iwl_mvm_scan_type {
+       IWL_SCAN_TYPE_UNASSOC,
+       IWL_SCAN_TYPE_WILD,
+       IWL_SCAN_TYPE_MILD,
+       IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_traffic_load {
+       IWL_MVM_TRAFFIC_LOW,
+       IWL_MVM_TRAFFIC_MEDIUM,
+       IWL_MVM_TRAFFIC_HIGH,
+};
+
+struct iwl_mvm_scan_timing_params {
+       u32 dwell_active;
+       u32 dwell_passive;
+       u32 dwell_fragmented;
        u32 suspend_time;
-       bool passive_fragmented;
+       u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+       [IWL_SCAN_TYPE_UNASSOC] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 0,
+               .max_out_time = 0,
+       },
+       [IWL_SCAN_TYPE_WILD] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 30,
+               .max_out_time = 120,
+       },
+       [IWL_SCAN_TYPE_MILD] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 120,
+               .max_out_time = 120,
+       },
+       [IWL_SCAN_TYPE_FRAGMENTED] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 95,
+               .max_out_time = 44,
+       },
+};
+
+struct iwl_mvm_scan_params {
+       enum iwl_mvm_scan_type type;
        u32 n_channels;
        u16 delay;
        int n_ssids;
@@ -90,13 +140,7 @@ struct iwl_mvm_scan_params {
        int n_match_sets;
        struct iwl_scan_probe_req preq;
        struct cfg80211_match_set *match_sets;
-       u16 passive_dwell;
-       u16 active_dwell;
-       u16 fragmented_dwell;
-       struct {
-               u8 iterations;
-               u8 full_scan_mul; /* not used for UMAC */
-       } schedule[2];
+       u8 iterations[2];
 };
 
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -156,76 +200,39 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                *global_cnt += 1;
 }
 
-static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
-                                   struct ieee80211_vif *vif,
-                                   struct iwl_mvm_scan_params *params)
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+       return IWL_MVM_TRAFFIC_LOW;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       struct iwl_mvm_scan_params *params)
 {
        int global_cnt = 0;
-       u8 frag_passive_dwell = 0;
+       enum iwl_mvm_traffic_load load;
+       bool low_latency;
 
        ieee80211_iterate_active_interfaces_atomic(mvm->hw,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_cnt);
        if (!global_cnt)
-               goto not_bound;
-
-       params->suspend_time = 30;
-       params->max_out_time = 120;
-
-       if (iwl_mvm_low_latency(mvm)) {
-               if (fw_has_api(&mvm->fw->ucode_capa,
-                              IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-
-                       params->suspend_time = 105;
-                       /*
-                        * If there is more than one active interface make
-                        * passive scan more fragmented.
-                        */
-                       frag_passive_dwell = 40;
-                       params->max_out_time = frag_passive_dwell;
-               } else {
-                       params->suspend_time = 120;
-                       params->max_out_time = 120;
-               }
-       }
-
-       if (frag_passive_dwell &&
-           fw_has_api(&mvm->fw->ucode_capa,
-                      IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-               /*
-                * P2P device scan should not be fragmented to avoid negative
-                * impact on P2P device discovery. Configure max_out_time to be
-                * equal to dwell time on passive channel.
-                */
-               if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-                       params->max_out_time = 120;
-               } else {
-                       params->passive_fragmented = true;
-               }
-       }
-
-       if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
-           (params->max_out_time > 200))
-               params->max_out_time = 200;
+               return IWL_SCAN_TYPE_UNASSOC;
 
-not_bound:
+       load = iwl_mvm_get_traffic_load(mvm);
+       low_latency = iwl_mvm_low_latency(mvm);
 
-       if (params->passive_fragmented)
-               params->fragmented_dwell = frag_passive_dwell;
+       if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+           vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+           fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+               return IWL_SCAN_TYPE_FRAGMENTED;
 
-       /*
-        * use only basic dwell time in scan command, regardless of the band or
-        * the number of the probes. FW will calculate the actual dwell time.
-        */
-       params->passive_dwell = 110;
-       params->active_dwell = 10;
+       if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+               return IWL_SCAN_TYPE_MILD;
 
-
-       IWL_DEBUG_SCAN(mvm,
-                      "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
-                      params->max_out_time, params->suspend_time,
-                      params->passive_fragmented);
+       return IWL_SCAN_TYPE_WILD;
 }
 
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@ -345,6 +352,11 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
                IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
                               aborted ? "aborted" : "completed",
                               iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+               IWL_DEBUG_SCAN(mvm,
+                              "Last line %d, Last iteration %d, Time after last iteration %d\n",
+                              scan_notif->last_schedule_line,
+                              scan_notif->last_schedule_iteration,
+                              __le32_to_cpu(scan_notif->time_after_last_iter));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
        } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
@@ -356,9 +368,14 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
        } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
                WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
 
-               IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+               IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
                               aborted ? "aborted" : "completed",
                               iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+               IWL_DEBUG_SCAN(mvm,
+                              "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
+                              scan_notif->last_schedule_line,
+                              scan_notif->last_schedule_iteration,
+                              __le32_to_cpu(scan_notif->time_after_last_iter));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
                ieee80211_sched_scan_stopped(mvm->hw);
@@ -699,12 +716,11 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_lmac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
-       cmd->active_dwell = params->active_dwell;
-       cmd->passive_dwell = params->passive_dwell;
-       if (params->passive_fragmented)
-               cmd->fragmented_dwell = params->fragmented_dwell;
-       cmd->max_out_time = cpu_to_le32(params->max_out_time);
-       cmd->suspend_time = cpu_to_le32(params->suspend_time);
+       cmd->active_dwell = scan_timing[params->type].dwell_active;
+       cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+       cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+       cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+       cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
        cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 }
 
@@ -741,7 +757,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
 
 static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
 {
-       return params->schedule[0].iterations + params->schedule[1].iterations;
+       return params->iterations[0] + params->iterations[1];
 }
 
 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
@@ -755,7 +771,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
        if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 
-       if (params->passive_fragmented)
+       if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
        if (iwl_mvm_rrm_scan_needed(mvm))
@@ -808,11 +824,11 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        ssid_bitmap <<= 1;
 
        cmd->schedule[0].delay = cpu_to_le16(params->interval);
-       cmd->schedule[0].iterations = params->schedule[0].iterations;
-       cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+       cmd->schedule[0].iterations = params->iterations[0];
+       cmd->schedule[0].full_scan_mul = 1;
        cmd->schedule[1].delay = cpu_to_le16(params->interval);
-       cmd->schedule[1].iterations = params->schedule[1].iterations;
-       cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
+       cmd->schedule[1].iterations = params->iterations[1];
+       cmd->schedule[1].full_scan_mul = 1;
 
        if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
                cmd->channel_opt[0].flags =
@@ -958,16 +974,15 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_umac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
-       cmd->active_dwell = params->active_dwell;
-       cmd->passive_dwell = params->passive_dwell;
-       if (params->passive_fragmented)
-               cmd->fragmented_dwell = params->fragmented_dwell;
-       cmd->max_out_time = cpu_to_le32(params->max_out_time);
-       cmd->suspend_time = cpu_to_le32(params->suspend_time);
+       cmd->active_dwell = scan_timing[params->type].dwell_active;
+       cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+       cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+       cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+       cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
        cmd->scan_priority =
                iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
-       if (iwl_mvm_scan_total_iterations(params) == 0)
+       if (iwl_mvm_scan_total_iterations(params) == 1)
                cmd->ooc_priority =
                        iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
        else
@@ -1003,7 +1018,7 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
        if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 
-       if (params->passive_fragmented)
+       if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
 
        if (iwl_mvm_rrm_scan_needed(mvm))
@@ -1172,12 +1187,10 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        params.n_match_sets = 0;
        params.match_sets = NULL;
 
-       params.schedule[0].iterations = 1;
-       params.schedule[0].full_scan_mul = 0;
-       params.schedule[1].iterations = 0;
-       params.schedule[1].full_scan_mul = 0;
+       params.iterations[0] = 1;
+       params.iterations[1] = 0;
 
-       iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+       params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
@@ -1255,10 +1268,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        params.n_match_sets = req->n_match_sets;
        params.match_sets = req->match_sets;
 
-       params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
-       params.schedule[0].full_scan_mul = 1;
-       params.schedule[1].iterations = 0xff;
-       params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+       params.iterations[0] = 0;
+       params.iterations[1] = 0xff;
+
+       params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
        if (req->interval > U16_MAX) {
                IWL_DEBUG_SCAN(mvm,
@@ -1281,8 +1294,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                params.delay = req->delay;
        }
 
-       iwl_mvm_scan_calc_dwell(mvm, vif, &params);
-
        ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
        if (ret)
                return ret;
@@ -1336,13 +1347,16 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        }
 
        mvm->scan_status &= ~mvm->scan_uid_status[uid];
-
        IWL_DEBUG_SCAN(mvm,
                       "Scan completed, uid %u type %u, status %s, EBS status %s\n",
                       uid, mvm->scan_uid_status[uid],
                       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
                                "completed" : "aborted",
                       iwl_mvm_ebs_status_str(notif->ebs_status));
+       IWL_DEBUG_SCAN(mvm,
+                      "Last line %d, Last iteration %d, Time from last iteration %d\n",
+                      notif->last_schedule, notif->last_iter,
+                      __le32_to_cpu(notif->time_from_last_iter));
 
        if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
            notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
index 2531aa3d6754a318df3045ceed55c521d99da4c1..df216cd0c98f4659d1c00d7d07c8adb31c8244c6 100644 (file)
@@ -1290,8 +1290,6 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
                const u8 *pn;
 
                memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
-               ieee80211_aes_cmac_calculate_k1_k2(keyconf,
-                                                  igtk_cmd.K1, igtk_cmd.K2);
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                pn = seq.aes_cmac.pn;
                igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
index d060e12c3239b06abc2d9615f4bcfccaf67a2cec..380972f8fb8281647b3c278d8b7b51dec306a008 100644 (file)
@@ -194,7 +194,7 @@ int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
                                  struct ieee80211_vif *vif)
 {
        struct iwl_host_cmd cmd = {
-               .id = TOF_CMD,
+               .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
                .len = { sizeof(mvm->tof_data.range_req), },
                /* no copy because of the command size */
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
index 15bf36ad3809d363d58290392e2bbcbcbaa540f7..6df5aada4f161d6b533dae2b07e01146b53798ef 100644 (file)
@@ -153,18 +153,20 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        if (ieee80211_is_mgmt(fc)) {
                if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-                       tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+               else if (ieee80211_is_action(fc))
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
                else
-                       tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 
                /* The spec allows Action frames in A-MPDU, we don't support
                 * it
                 */
                WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
        } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
-               tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+               tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
        } else {
-               tx_cmd->pm_frame_timeout = 0;
+               tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
        }
 
        if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
index 4f872f05d988f48fa5e5f6c66d650bdc0fdf47ca..feb2f7e8113464113f330c3141a03f05ea4bef33 100644 (file)
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -77,29 +92,29 @@ struct isr_statistics {
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
        __le32 *bd;
        dma_addr_t bd_dma;
-       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
        u32 read;
        u32 write;
        u32 free_count;
+       u32 used_count;
        u32 write_actual;
        struct list_head rx_free;
        struct list_head rx_used;
@@ -107,6 +122,32 @@ struct iwl_rxq {
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
+       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ *     the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ *     of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+       struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+       atomic_t req_pending;
+       atomic_t req_ready;
+       struct list_head rbd_allocated;
+       struct list_head rbd_empty;
+       spinlock_t lock;
+       struct workqueue_struct *alloc_wq;
+       struct work_struct rx_alloc;
 };
 
 struct iwl_dma_ptr {
@@ -250,7 +291,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -275,7 +316,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  */
 struct iwl_trans_pcie {
        struct iwl_rxq rxq;
-       struct work_struct rx_replenish;
+       struct iwl_rb_allocator rba;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
index e1af0fffedd818b0e16a6e72fbce9f9445ca8bce..e06591f625c4a9633aedbece40bf9d74bd39d8f7 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  * resets the Rx queue buffers with new memory.
  *
  * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ *   When the interrupt handler is called, the request is processed.
+ *   The page is either stolen - transferred to the upper layer
+ *   or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ *   count, detaches the RBD and transfers it to the queue used list.
+ *   When there are two used RBDs - they are transferred to the allocator empty
+ *   list. Work is then scheduled for the allocator to start allocating
+ *   eight buffers.
+ *   When there are another 6 used RBDs - they are transferred to the allocator
+ *   empty list and the driver tries to claim the pre-allocated buffers and
+ *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ *   until ready.
+ *   When there are 8+ buffers in the free list - either from allocation or from
+ *   8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ *   the allocator has initial pool in the size of num_queues*(8-2) - the
+ *   maximum missing RBDs per allocation request (request posted with 2
+ *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ *   The queues supplies the recycle of the rest of the RBDs.
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock
+ *                            iwl_pcie_rxq_restock.
+ *                            Used only during initialization.
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_pcie_rx_replenish
+ *                            the WRITE index.
+ * iwl_pcie_rx_allocator()     Background work for allocating pages.
  *
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
+ *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  *
  */
@@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
                rxq->free_count--;
        }
        spin_unlock(&rxq->lock);
-       /* If the pre-allocated buffer pool is dropping low, schedule to
-        * refill it */
-       if (rxq->free_count <= RX_LOW_WATERMARK)
-               schedule_work(&trans_pcie->rx_replenish);
 
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
@@ -254,6 +277,45 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
        }
 }
 
+/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+                                          gfp_t priority)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct page *page;
+       gfp_t gfp_mask = priority;
+
+       if (rxq->free_count > RX_LOW_WATERMARK)
+               gfp_mask |= __GFP_NOWARN;
+
+       if (trans_pcie->rx_page_order > 0)
+               gfp_mask |= __GFP_COMP;
+
+       /* Alloc a new receive buffer */
+       page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+       if (!page) {
+               if (net_ratelimit())
+                       IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+                                      trans_pcie->rx_page_order);
+               /* Issue an error if the hardware has consumed more than half
+                * of its free buffer list and we don't have enough
+                * pre-allocated buffers.
+`               */
+               if (rxq->free_count <= RX_LOW_WATERMARK &&
+                   iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+                   net_ratelimit())
+                       IWL_CRIT(trans,
+                                "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+                                rxq->free_count);
+               return NULL;
+       }
+       return page;
+}
+
 /*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
@@ -269,7 +331,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
        struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
        struct page *page;
-       gfp_t gfp_mask = priority;
 
        while (1) {
                spin_lock(&rxq->lock);
@@ -279,32 +340,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
                }
                spin_unlock(&rxq->lock);
 
-               if (rxq->free_count > RX_LOW_WATERMARK)
-                       gfp_mask |= __GFP_NOWARN;
-
-               if (trans_pcie->rx_page_order > 0)
-                       gfp_mask |= __GFP_COMP;
-
                /* Alloc a new receive buffer */
-               page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-               if (!page) {
-                       if (net_ratelimit())
-                               IWL_DEBUG_INFO(trans, "alloc_pages failed, "
-                                          "order: %d\n",
-                                          trans_pcie->rx_page_order);
-
-                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-                           net_ratelimit())
-                               IWL_CRIT(trans, "Failed to alloc_pages with %s."
-                                        "Only %u free buffers remaining.\n",
-                                        priority == GFP_ATOMIC ?
-                                        "GFP_ATOMIC" : "GFP_KERNEL",
-                                        rxq->free_count);
-                       /* We don't reschedule replenish work here -- we will
-                        * call the restock method and if it still needs
-                        * more buffers it will schedule replenish */
+               page = iwl_pcie_rx_alloc_page(trans, priority);
+               if (!page)
                        return;
-               }
 
                spin_lock(&rxq->lock);
 
@@ -355,7 +394,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
 
        lockdep_assert_held(&rxq->lock);
 
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+       for (i = 0; i < RX_QUEUE_SIZE; i++) {
                if (!rxq->pool[i].page)
                        continue;
                dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +411,164 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  * When moving to rx_free an page is allocated for the slot.
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
-       iwl_pcie_rxq_alloc_rbs(trans, gfp);
+       iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
 
        iwl_pcie_rxq_restock(trans);
 }
 
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       struct list_head local_empty;
+       int pending = atomic_xchg(&rba->req_pending, 0);
+
+       IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+
+       /* If we were scheduled - there is at least one request */
+       spin_lock(&rba->lock);
+       /* swap out the rba->rbd_empty to a local list */
+       list_replace_init(&rba->rbd_empty, &local_empty);
+       spin_unlock(&rba->lock);
+
+       while (pending) {
+               int i;
+               struct list_head local_allocated;
+
+               INIT_LIST_HEAD(&local_allocated);
+
+               for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+                       struct iwl_rx_mem_buffer *rxb;
+                       struct page *page;
+
+                       /* List should never be empty - each reused RBD is
+                        * returned to the list, and initial pool covers any
+                        * possible gap between the time the page is allocated
+                        * to the time the RBD is added.
+                        */
+                       BUG_ON(list_empty(&local_empty));
+                       /* Get the first rxb from the rbd list */
+                       rxb = list_first_entry(&local_empty,
+                                              struct iwl_rx_mem_buffer, list);
+                       BUG_ON(rxb->page);
+
+                       /* Alloc a new receive buffer */
+                       page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
+                       if (!page)
+                               continue;
+                       rxb->page = page;
+
+                       /* Get physical address of the RB */
+                       rxb->page_dma = dma_map_page(trans->dev, page, 0,
+                                       PAGE_SIZE << trans_pcie->rx_page_order,
+                                       DMA_FROM_DEVICE);
+                       if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+                               rxb->page = NULL;
+                               __free_pages(page, trans_pcie->rx_page_order);
+                               continue;
+                       }
+                       /* dma address must be no more than 36 bits */
+                       BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+                       /* and also 256 byte aligned! */
+                       BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+                       /* move the allocated entry to the out list */
+                       list_move(&rxb->list, &local_allocated);
+                       i++;
+               }
+
+               pending--;
+               if (!pending) {
+                       pending = atomic_xchg(&rba->req_pending, 0);
+                       IWL_DEBUG_RX(trans,
+                                    "Pending allocation requests = %d\n",
+                                    pending);
+               }
+
+               spin_lock(&rba->lock);
+               /* add the allocated rbds to the allocator allocated list */
+               list_splice_tail(&local_allocated, &rba->rbd_allocated);
+               /* get more empty RBDs for current pending requests */
+               list_splice_tail_init(&rba->rbd_empty, &local_empty);
+               spin_unlock(&rba->lock);
+
+               atomic_inc(&rba->req_ready);
+       }
+
+       spin_lock(&rba->lock);
+       /* return unused rbds to the allocator empty list */
+       list_splice_tail(&local_empty, &rba->rbd_empty);
+       spin_unlock(&rba->lock);
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+                                    struct iwl_rx_mem_buffer
+                                    *out[RX_CLAIM_REQ_ALLOC])
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int i;
+
+       /*
+        * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+        * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+        * function will return -ENOMEM, as there are no ready requests.
+        * atomic_dec_if_positive will perofrm the *actual* decrement only if
+        * req_ready > 0, i.e. - there are ready requests and the function
+        * hands one request to the caller.
+        */
+       if (atomic_dec_if_positive(&rba->req_ready) < 0)
+               return -ENOMEM;
+
+       spin_lock(&rba->lock);
+       for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+               /* Get next free Rx buffer, remove it from free list */
+               out[i] = list_first_entry(&rba->rbd_allocated,
+                              struct iwl_rx_mem_buffer, list);
+               list_del(&out[i]->list);
+       }
+       spin_unlock(&rba->lock);
+
+       return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
 {
+       struct iwl_rb_allocator *rba_p =
+               container_of(data, struct iwl_rb_allocator, rx_alloc);
        struct iwl_trans_pcie *trans_pcie =
-           container_of(data, struct iwl_trans_pcie, rx_replenish);
+               container_of(rba_p, struct iwl_trans_pcie, rba);
 
-       iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+       iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        struct device *dev = trans->dev;
 
        memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
        spin_lock_init(&rxq->lock);
+       spin_lock_init(&rba->lock);
 
        if (WARN_ON(rxq->bd || rxq->rb_stts))
                return -EINVAL;
@@ -487,15 +658,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
        INIT_LIST_HEAD(&rxq->rx_free);
        INIT_LIST_HEAD(&rxq->rx_used);
        rxq->free_count = 0;
+       rxq->used_count = 0;
 
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+       for (i = 0; i < RX_QUEUE_SIZE; i++)
                list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+       int i;
+
+       lockdep_assert_held(&rba->lock);
+
+       INIT_LIST_HEAD(&rba->rbd_allocated);
+       INIT_LIST_HEAD(&rba->rbd_empty);
+
+       for (i = 0; i < RX_POOL_SIZE; i++)
+               list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int i;
+
+       lockdep_assert_held(&rba->lock);
+
+       for (i = 0; i < RX_POOL_SIZE; i++) {
+               if (!rba->pool[i].page)
+                       continue;
+               dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+                              PAGE_SIZE << trans_pcie->rx_page_order,
+                              DMA_FROM_DEVICE);
+               __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+               rba->pool[i].page = NULL;
+       }
+}
+
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        int i, err;
 
        if (!rxq->bd) {
@@ -503,11 +708,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
                if (err)
                        return err;
        }
+       if (!rba->alloc_wq)
+               rba->alloc_wq = alloc_workqueue("rb_allocator",
+                                               WQ_HIGHPRI | WQ_UNBOUND, 1);
+       INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+       spin_lock(&rba->lock);
+       atomic_set(&rba->req_pending, 0);
+       atomic_set(&rba->req_ready, 0);
+       /* free all first - we might be reconfigured for a different size */
+       iwl_pcie_rx_free_rba(trans);
+       iwl_pcie_rx_init_rba(rba);
+       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
 
-       INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
        /* free all first - we might be reconfigured for a different size */
        iwl_pcie_rxq_free_rbs(trans);
        iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +737,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
        spin_unlock(&rxq->lock);
 
-       iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+       iwl_pcie_rx_replenish(trans);
 
        iwl_pcie_rx_hw_init(trans, rxq);
 
@@ -537,6 +752,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
        /*if rxq->bd is NULL, it means that nothing has been allocated,
         * exit now */
@@ -545,7 +761,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
                return;
        }
 
-       cancel_work_sync(&trans_pcie->rx_replenish);
+       cancel_work_sync(&rba->rx_alloc);
+       if (rba->alloc_wq) {
+               destroy_workqueue(rba->alloc_wq);
+               rba->alloc_wq = NULL;
+       }
+
+       spin_lock(&rba->lock);
+       iwl_pcie_rx_free_rba(trans);
+       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
        iwl_pcie_rxq_free_rbs(trans);
@@ -566,8 +790,49 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
        rxq->rb_stts = NULL;
 }
 
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+                                 struct iwl_rx_mem_buffer *rxb,
+                                 struct iwl_rxq *rxq, bool emergency)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+       /* Move the RBD to the used list, will be moved to allocator in batches
+        * before claiming or posting a request*/
+       list_add_tail(&rxb->list, &rxq->rx_used);
+
+       if (unlikely(emergency))
+               return;
+
+       /* Count the allocator owned RBDs */
+       rxq->used_count++;
+
+       /* If we have RX_POST_REQ_ALLOC new released rx buffers -
+        * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+        * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+        * after but we still need to post another request.
+        */
+       if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+               /* Move the 2 RBDs to the allocator ownership.
+                Allocator has another 6 from pool for the request completion*/
+               spin_lock(&rba->lock);
+               list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+               spin_unlock(&rba->lock);
+
+               atomic_inc(&rba->req_pending);
+               queue_work(rba->alloc_wq, &rba->rx_alloc);
+       }
+}
+
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
-                               struct iwl_rx_mem_buffer *rxb)
+                               struct iwl_rx_mem_buffer *rxb,
+                               bool emergency)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
@@ -633,7 +898,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               iwl_op_mode_rx(trans->op_mode, &rxcb);
+               iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
 
                if (reclaim) {
                        kzfree(txq->entries[cmd_index].free_buf);
@@ -682,13 +947,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         */
                        __free_pages(rxb->page, trans_pcie->rx_page_order);
                        rxb->page = NULL;
-                       list_add_tail(&rxb->list, &rxq->rx_used);
+                       iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
                } else {
                        list_add_tail(&rxb->list, &rxq->rx_free);
                        rxq->free_count++;
                }
        } else
-               list_add_tail(&rxb->list, &rxq->rx_used);
+               iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 }
 
 /*
@@ -698,10 +963,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       u32 r, i;
-       u8 fill_rx = 0;
-       u32 count = 8;
-       int total_empty;
+       u32 r, i, j, count = 0;
+       bool emergency = false;
 
 restart:
        spin_lock(&rxq->lock);
@@ -714,47 +977,95 @@ restart:
        if (i == r)
                IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
-       /* calculate total frames need to be restock after handling RX */
-       total_empty = r - rxq->write_actual;
-       if (total_empty < 0)
-               total_empty += RX_QUEUE_SIZE;
-
-       if (total_empty > (RX_QUEUE_SIZE / 2))
-               fill_rx = 1;
-
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
 
+               if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+                       emergency = true;
+
                rxb = rxq->queue[i];
                rxq->queue[i] = NULL;
 
                IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
                             r, i, rxb);
-               iwl_pcie_rx_handle_rb(trans, rxb);
+               iwl_pcie_rx_handle_rb(trans, rxb, emergency);
 
                i = (i + 1) & RX_QUEUE_MASK;
-               /* If there are a lot of unused frames,
-                * restock the Rx queue so ucode wont assert. */
-               if (fill_rx) {
+
+               /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+                * try to claim the pre-allocated buffers from the allocator */
+               if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+                       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+                       struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+                       if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
+                           !emergency) {
+                               /* Add the remaining 6 empty RBDs
+                               * for allocator use
+                                */
+                               spin_lock(&rba->lock);
+                               list_splice_tail_init(&rxq->rx_used,
+                                                     &rba->rbd_empty);
+                               spin_unlock(&rba->lock);
+                       }
+
+                       /* If not ready - continue, will try to reclaim later.
+                       * No need to reschedule work - allocator exits only on
+                       * success */
+                       if (!iwl_pcie_rx_allocator_get(trans, out)) {
+                               /* If success - then RX_CLAIM_REQ_ALLOC
+                                * buffers were retrieved and should be added
+                                * to free list */
+                               rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+                               for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+                                       list_add_tail(&out[j]->list,
+                                                     &rxq->rx_free);
+                                       rxq->free_count++;
+                               }
+                       }
+               }
+               if (emergency) {
                        count++;
-                       if (count >= 8) {
-                               rxq->read = i;
-                               spin_unlock(&rxq->lock);
-                               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+                       if (count == 8) {
                                count = 0;
-                               goto restart;
+                               if (rxq->used_count < RX_QUEUE_SIZE / 3)
+                                       emergency = false;
+                               spin_unlock(&rxq->lock);
+                               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+                               spin_lock(&rxq->lock);
                        }
                }
+               /* handle restock for three cases, can be all of them at once:
+               * - we just pulled buffers from the allocator
+               * - we have 8+ unstolen pages accumulated
+               * - we are in emergency and allocated buffers
+                */
+               if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
+                       rxq->read = i;
+                       spin_unlock(&rxq->lock);
+                       iwl_pcie_rxq_restock(trans);
+                       goto restart;
+               }
        }
 
        /* Backtrack one entry */
        rxq->read = i;
        spin_unlock(&rxq->lock);
 
-       if (fill_rx)
-               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-       else
-               iwl_pcie_rxq_restock(trans);
+       /*
+        * handle a case where in emergency there are some unallocated RBDs.
+        * those RBDs are in the used list, but are not tracked by the queue's
+        * used_count which counts allocator owned RBDs.
+        * unallocated emergency RBDs must be allocated on exit, otherwise
+        * when called again the function may not be in emergency mode and
+        * they will be handed to the allocator with no tracking in the RBD
+        * allocator counters, which will lead to them never being claimed back
+        * by the queue.
+        * by allocating them here, they are now in the queue free list, and
+        * will be restocked by the next call of iwl_pcie_rxq_restock.
+        */
+       if (unlikely(emergency && count))
+               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
 
        if (trans_pcie->napi.poll)
                napi_gro_flush(&trans_pcie->napi, false);
index 0549c91ad3729fdedcd7da944d796674a6cec167..6ba7d300b08f35e1ce05fdf8b3fcab058819f150 100644 (file)
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
                        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
                                          APMG_PCIDEV_STT_VAL_WAKE_ME);
-               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+                       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
                        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                                    CSR_HW_IF_CONFIG_REG_PREPARE |
                                    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+                       mdelay(1);
+                       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+               }
                mdelay(5);
        }
 
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
        if (ret >= 0)
                return 0;
 
+       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
+       msleep(1);
+
        for (iter = 0; iter < 10; iter++) {
                /* If HW is not ready, prepare the conditions to check again */
                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
                do {
                        ret = iwl_pcie_set_hw_ready(trans);
-                       if (ret >= 0)
-                               return 0;
+                       if (ret >= 0) {
+                               ret = 0;
+                               goto out;
+                       }
 
                        usleep_range(200, 1000);
                        t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
        IWL_ERR(trans, "Couldn't prepare the card\n");
 
+out:
+       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
        return ret;
 }
 
@@ -764,8 +780,15 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
        for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
                last_read_idx = i;
 
+               /*
+                * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+                * CPU1 to CPU2.
+                * PAGING_SEPARATOR_SECTION delimiter - separate between
+                * CPU2 non paged to CPU2 paging sec.
+                */
                if (!image->sec[i].data ||
-                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+                   image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
                        IWL_DEBUG_FW(trans,
                                     "Break since Data not valid or Empty section, sec = %d\n",
                                     i);
@@ -813,8 +836,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
        for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
                last_read_idx = i;
 
+               /*
+                * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+                * CPU1 to CPU2.
+                * PAGING_SEPARATOR_SECTION delimiter - separate between
+                * CPU2 non paged to CPU2 paging sec.
+                */
                if (!image->sec[i].data ||
-                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+                   image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
                        IWL_DEBUG_FW(trans,
                                     "Break since Data not valid or Empty section, sec = %d\n",
                                     i);
@@ -1430,11 +1460,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
         * As this function may be called again in some corner cases don't
         * do anything if NAPI was already initialized.
         */
-       if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+       if (!trans_pcie->napi.poll) {
                init_dummy_netdev(&trans_pcie->napi_dev);
-               iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
-                                    &trans_pcie->napi_dev,
-                                    iwl_pcie_dummy_napi_poll, 64);
+               netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
+                              iwl_pcie_dummy_napi_poll, 64);
        }
 }
 
@@ -2261,6 +2290,47 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
        return prph_len;
 }
 
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+                                  struct iwl_fw_error_dump_data **data,
+                                  int allocated_rb_nums)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       u32 i, r, j, rb_len = 0;
+
+       spin_lock(&rxq->lock);
+
+       r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+
+       for (i = rxq->read, j = 0;
+            i != r && j < allocated_rb_nums;
+            i = (i + 1) & RX_QUEUE_MASK, j++) {
+               struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+               struct iwl_fw_error_dump_rb *rb;
+
+               dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+                              DMA_FROM_DEVICE);
+
+               rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+               (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+               (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+               rb = (void *)(*data)->data;
+               rb->index = cpu_to_le32(i);
+               memcpy(rb->data, page_address(rxb->page), max_len);
+               /* remap the page for the free benefit */
+               rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+                                                    max_len,
+                                                    DMA_FROM_DEVICE);
+
+               *data = iwl_fw_error_next_data(*data);
+       }
+
+       spin_unlock(&rxq->lock);
+
+       return rb_len;
+}
 #define IWL_CSR_TO_DUMP (0x250)
 
 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
@@ -2330,17 +2400,97 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
        return monitor_len;
 }
 
-static
-struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+                           struct iwl_fw_error_dump_data **data,
+                           u32 monitor_len)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       u32 len = 0;
+
+       if ((trans_pcie->fw_mon_page &&
+            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+           trans->dbg_dest_tlv) {
+               struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+               u32 base, write_ptr, wrap_cnt;
+
+               /* If there was a dest TLV - use the values from there */
+               if (trans->dbg_dest_tlv) {
+                       write_ptr =
+                               le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+                       wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+                       base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+               } else {
+                       base = MON_BUFF_BASE_ADDR;
+                       write_ptr = MON_BUFF_WRPTR;
+                       wrap_cnt = MON_BUFF_CYCLE_CNT;
+               }
+
+               (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+               fw_mon_data = (void *)(*data)->data;
+               fw_mon_data->fw_mon_wr_ptr =
+                       cpu_to_le32(iwl_read_prph(trans, write_ptr));
+               fw_mon_data->fw_mon_cycle_cnt =
+                       cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+               fw_mon_data->fw_mon_base_ptr =
+                       cpu_to_le32(iwl_read_prph(trans, base));
+
+               len += sizeof(**data) + sizeof(*fw_mon_data);
+               if (trans_pcie->fw_mon_page) {
+                       /*
+                        * The firmware is now asserted, it won't write anything
+                        * to the buffer. CPU can take ownership to fetch the
+                        * data. The buffer will be handed back to the device
+                        * before the firmware will be restarted.
+                        */
+                       dma_sync_single_for_cpu(trans->dev,
+                                               trans_pcie->fw_mon_phys,
+                                               trans_pcie->fw_mon_size,
+                                               DMA_FROM_DEVICE);
+                       memcpy(fw_mon_data->data,
+                              page_address(trans_pcie->fw_mon_page),
+                              trans_pcie->fw_mon_size);
+
+                       monitor_len = trans_pcie->fw_mon_size;
+               } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+                       /*
+                        * Update pointers to reflect actual values after
+                        * shifting
+                        */
+                       base = iwl_read_prph(trans, base) <<
+                              trans->dbg_dest_tlv->base_shift;
+                       iwl_trans_read_mem(trans, base, fw_mon_data->data,
+                                          monitor_len / sizeof(u32));
+               } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+                       monitor_len =
+                               iwl_trans_pci_dump_marbh_monitor(trans,
+                                                                fw_mon_data,
+                                                                monitor_len);
+               } else {
+                       /* Didn't match anything - output no monitor data */
+                       monitor_len = 0;
+               }
+
+               len += monitor_len;
+               (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+       }
+
+       return len;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+                         struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_fw_error_dump_data *data;
        struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
        struct iwl_fw_error_dump_txcmd *txcmd;
        struct iwl_trans_dump_data *dump_data;
-       u32 len;
+       u32 len, num_rbs;
        u32 monitor_len;
        int i, ptr;
+       bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
 
        /* transport dump header */
        len = sizeof(*dump_data);
@@ -2349,22 +2499,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
        len += sizeof(*data) +
                cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
 
-       /* CSR registers */
-       len += sizeof(*data) + IWL_CSR_TO_DUMP;
-
-       /* PRPH registers */
-       for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
-               /* The range includes both boundaries */
-               int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
-                       iwl_prph_dump_addr[i].start + 4;
-
-               len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
-                       num_bytes_in_chunk;
-       }
-
-       /* FH registers */
-       len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
-
        /* FW monitor */
        if (trans_pcie->fw_mon_page) {
                len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2392,6 +2526,45 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
                monitor_len = 0;
        }
 
+       if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+               dump_data = vzalloc(len);
+               if (!dump_data)
+                       return NULL;
+
+               data = (void *)dump_data->data;
+               len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+               dump_data->len = len;
+
+               return dump_data;
+       }
+
+       /* CSR registers */
+       len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+       /* PRPH registers */
+       for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+               /* The range includes both boundaries */
+               int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+                       iwl_prph_dump_addr[i].start + 4;
+
+               len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+                      num_bytes_in_chunk;
+       }
+
+       /* FH registers */
+       len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
+       if (dump_rbs) {
+               /* RBs */
+               num_rbs = le16_to_cpu(ACCESS_ONCE(
+                                     trans_pcie->rxq.rb_stts->closed_rb_num))
+                                     & 0x0FFF;
+               num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+               len += num_rbs * (sizeof(*data) +
+                                 sizeof(struct iwl_fw_error_dump_rb) +
+                                 (PAGE_SIZE << trans_pcie->rx_page_order));
+       }
+
        dump_data = vzalloc(len);
        if (!dump_data)
                return NULL;
@@ -2428,74 +2601,10 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
        len += iwl_trans_pcie_dump_prph(trans, &data);
        len += iwl_trans_pcie_dump_csr(trans, &data);
        len += iwl_trans_pcie_fh_regs_dump(trans, &data);
-       /* data is already pointing to the next section */
-
-       if ((trans_pcie->fw_mon_page &&
-            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
-           trans->dbg_dest_tlv) {
-               struct iwl_fw_error_dump_fw_mon *fw_mon_data;
-               u32 base, write_ptr, wrap_cnt;
-
-               /* If there was a dest TLV - use the values from there */
-               if (trans->dbg_dest_tlv) {
-                       write_ptr =
-                               le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
-                       wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
-                       base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
-               } else {
-                       base = MON_BUFF_BASE_ADDR;
-                       write_ptr = MON_BUFF_WRPTR;
-                       wrap_cnt = MON_BUFF_CYCLE_CNT;
-               }
-
-               data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
-               fw_mon_data = (void *)data->data;
-               fw_mon_data->fw_mon_wr_ptr =
-                       cpu_to_le32(iwl_read_prph(trans, write_ptr));
-               fw_mon_data->fw_mon_cycle_cnt =
-                       cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
-               fw_mon_data->fw_mon_base_ptr =
-                       cpu_to_le32(iwl_read_prph(trans, base));
-
-               len += sizeof(*data) + sizeof(*fw_mon_data);
-               if (trans_pcie->fw_mon_page) {
-                       /*
-                        * The firmware is now asserted, it won't write anything
-                        * to the buffer. CPU can take ownership to fetch the
-                        * data. The buffer will be handed back to the device
-                        * before the firmware will be restarted.
-                        */
-                       dma_sync_single_for_cpu(trans->dev,
-                                               trans_pcie->fw_mon_phys,
-                                               trans_pcie->fw_mon_size,
-                                               DMA_FROM_DEVICE);
-                       memcpy(fw_mon_data->data,
-                              page_address(trans_pcie->fw_mon_page),
-                              trans_pcie->fw_mon_size);
-
-                       monitor_len = trans_pcie->fw_mon_size;
-               } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
-                       /*
-                        * Update pointers to reflect actual values after
-                        * shifting
-                        */
-                       base = iwl_read_prph(trans, base) <<
-                              trans->dbg_dest_tlv->base_shift;
-                       iwl_trans_read_mem(trans, base, fw_mon_data->data,
-                                          monitor_len / sizeof(u32));
-               } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
-                       monitor_len =
-                               iwl_trans_pci_dump_marbh_monitor(trans,
-                                                                fw_mon_data,
-                                                                monitor_len);
-               } else {
-                       /* Didn't match anything - output no monitor data */
-                       monitor_len = 0;
-               }
+       if (dump_rbs)
+               len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
 
-               len += monitor_len;
-               data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
-       }
+       len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
 
        dump_data->len = len;
 
@@ -2558,6 +2667,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        if (!trans)
                return ERR_PTR(-ENOMEM);
 
+       trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
+
        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
        trans_pcie->trans = trans;
index 601eee1ad60b3c9771724678ae17e0a8a6a54f8a..a8c8a4a7420b53d02a798bcc261f408f77440ace 100644 (file)
@@ -388,11 +388,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 
        /* first TB is never freed - it's the scratchbuf data */
 
-       for (i = 1; i < num_tbs; i++)
-               dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
-                                iwl_pcie_tfd_tb_get_len(tfd, i),
-                                DMA_TO_DEVICE);
-
+       for (i = 1; i < num_tbs; i++) {
+               if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+                       dma_unmap_page(trans->dev,
+                                      iwl_pcie_tfd_tb_get_addr(tfd, i),
+                                      iwl_pcie_tfd_tb_get_len(tfd, i),
+                                      DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(trans->dev,
+                                        iwl_pcie_tfd_tb_get_addr(tfd, i),
+                                        iwl_pcie_tfd_tb_get_len(tfd, i),
+                                        DMA_TO_DEVICE);
+       }
        tfd->num_tbs = 0;
 }
 
@@ -468,7 +475,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
 
        iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
 
-       return 0;
+       return num_tbs;
 }
 
 static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
@@ -1546,6 +1553,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
        }
 
+       BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
+                    sizeof(out_meta->flags) * BITS_PER_BYTE);
        out_meta->flags = cmd->flags;
        if (WARN_ON_ONCE(txq->entries[idx].free_buf))
                kzfree(txq->entries[idx].free_buf);
@@ -1789,7 +1798,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      struct iwl_device_cmd *dev_cmd, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_hdr *hdr;
        struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
        struct iwl_cmd_meta *out_meta;
        struct iwl_txq *txq;
@@ -1798,9 +1807,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        void *tb1_addr;
        u16 len, tb1_len, tb2_len;
        bool wait_write_ptr;
-       __le16 fc = hdr->frame_control;
-       u8 hdr_len = ieee80211_hdrlen(fc);
+       __le16 fc;
+       u8 hdr_len;
        u16 wifi_seq;
+       int i;
 
        txq = &trans_pcie->txq[txq_id];
        q = &txq->q;
@@ -1809,6 +1819,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      "TX on unused queue %d\n", txq_id))
                return -EINVAL;
 
+       if (skb_is_nonlinear(skb) &&
+           skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+           __skb_linearize(skb))
+               return -ENOMEM;
+
+       /* mac80211 always puts the full header into the SKB's head,
+        * so there's no need to check if it's readable there
+        */
+       hdr = (struct ieee80211_hdr *)skb->data;
+       fc = hdr->frame_control;
+       hdr_len = ieee80211_hdrlen(fc);
+
        spin_lock(&txq->lock);
 
        /* In AGG mode, the index in the ring must correspond to the WiFi
@@ -1839,6 +1861,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /* Set up first empty entry in queue's array of Tx/cmd buffers */
        out_meta = &txq->entries[q->write_ptr].meta;
+       out_meta->flags = 0;
 
        /*
         * The second TB (tb1) points to the remainder of the TX command
@@ -1872,9 +1895,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /*
         * Set up TFD's third entry to point directly to remainder
-        * of skb, if any (802.11 null frames have no payload).
+        * of skb's head, if any
         */
-       tb2_len = skb->len - hdr_len;
+       tb2_len = skb_headlen(skb) - hdr_len;
        if (tb2_len > 0) {
                dma_addr_t tb2_phys = dma_map_single(trans->dev,
                                                     skb->data + hdr_len,
@@ -1887,6 +1910,29 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
        }
 
+       /* set up the remaining entries to point to the data */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               dma_addr_t tb_phys;
+               int tb_idx;
+
+               if (!skb_frag_size(frag))
+                       continue;
+
+               tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+                                          skb_frag_size(frag), DMA_TO_DEVICE);
+
+               if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+                       iwl_pcie_tfd_unmap(trans, out_meta,
+                                          &txq->tfds[q->write_ptr]);
+                       goto out_err;
+               }
+               tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+                                               skb_frag_size(frag), false);
+
+               out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+       }
+
        /* Set up entry for this TFD in Tx byte-count array */
        iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
@@ -1896,14 +1942,25 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                             &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
                             skb->data + hdr_len, tb2_len);
        trace_iwlwifi_dev_tx_data(trans->dev, skb,
-                                 skb->data + hdr_len, tb2_len);
+                                 hdr_len, skb->len - hdr_len);
 
        wait_write_ptr = ieee80211_has_morefrags(fc);
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr) {
-               if (txq->wd_timeout)
-                       mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+               if (txq->wd_timeout) {
+                       /*
+                        * If the TXQ is active, then set the timer, if not,
+                        * set the timer in remainder so that the timer will
+                        * be armed with the right value when the station will
+                        * wake up.
+                        */
+                       if (!txq->frozen)
+                               mod_timer(&txq->stuck_timer,
+                                         jiffies + txq->wd_timeout);
+                       else
+                               txq->frozen_expiry_remainder = txq->wd_timeout;
+               }
                IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
                iwl_trans_pcie_ref(trans);
        }
index 99e873dc86847c80de5a4d27cfdf84c62d83a519..520bef80747f295bea9f98e4ca2573bbc5dde48a 100644 (file)
@@ -2399,6 +2399,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        ieee80211_hw_set(hw, AMPDU_AGGREGATION);
        ieee80211_hw_set(hw, MFP_CAPABLE);
        ieee80211_hw_set(hw, SIGNAL_DBM);
+       ieee80211_hw_set(hw, TDLS_WIDER_BW);
        if (rctbl)
                ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
 
@@ -2676,7 +2677,7 @@ static void hwsim_mon_setup(struct net_device *dev)
        dev->netdev_ops = &hwsim_netdev_ops;
        dev->destructor = free_netdev;
        ether_setup(dev);
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->type = ARPHRD_IEEE80211_RADIOTAP;
        eth_zero_addr(dev->dev_addr);
        dev->dev_addr[0] = 0x12;
@@ -3120,8 +3121,10 @@ static int hwsim_init_netlink(void)
                goto failure;
 
        rc = netlink_register_notifier(&hwsim_netlink_notifier);
-       if (rc)
+       if (rc) {
+               genl_unregister_family(&hwsim_genl_family);
                goto failure;
+       }
 
        return 0;
 
index 77361af68b1868cfcfd67cc71f928ef95a243880..9420fc61c2e6ab09aeba53d01930be1ebc9fd0f3 100644 (file)
@@ -5019,35 +5019,36 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
 
                rcu_read_unlock();
-       }
 
-       if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
-           !priv->ap_fw) {
-               rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
-               if (rc)
-                       goto out;
+               if (changed & BSS_CHANGED_ASSOC) {
+                       if (!priv->ap_fw) {
+                               rc = mwl8k_cmd_set_rate(hw, vif,
+                                                       ap_legacy_rates,
+                                                       ap_mcs_rates);
+                               if (rc)
+                                       goto out;
 
-               rc = mwl8k_cmd_use_fixed_rate_sta(hw);
-               if (rc)
-                       goto out;
-       } else {
-               if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
-                   priv->ap_fw) {
-                       int idx;
-                       int rate;
+                               rc = mwl8k_cmd_use_fixed_rate_sta(hw);
+                               if (rc)
+                                       goto out;
+                       } else {
+                               int idx;
+                               int rate;
 
-                       /* Use AP firmware specific rate command.
-                        */
-                       idx = ffs(vif->bss_conf.basic_rates);
-                       if (idx)
-                               idx--;
+                               /* Use AP firmware specific rate command.
+                                */
+                               idx = ffs(vif->bss_conf.basic_rates);
+                               if (idx)
+                                       idx--;
 
-                       if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
-                               rate = mwl8k_rates_24[idx].hw_value;
-                       else
-                               rate = mwl8k_rates_50[idx].hw_value;
+                               if (hw->conf.chandef.chan->band ==
+                                   IEEE80211_BAND_2GHZ)
+                                       rate = mwl8k_rates_24[idx].hw_value;
+                               else
+                                       rate = mwl8k_rates_50[idx].hw_value;
 
-                       mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+                               mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+                       }
                }
        }
 
index b6cc9ff47fc2e59b3cc92fe4002f05e59d981e56..40d72312f3df21dd6e4e12019febd6363a6f7912 100644 (file)
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                (struct rsi_91x_sdiodev *)adapter->rsi_dev;
        u32 len;
        u32 num_blocks;
+       const u8 *fw;
        const struct firmware *fw_entry = NULL;
        u32 block_size = dev->tx_blk_size;
        int status = 0;
@@ -200,6 +201,12 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                return status;
        }
 
+       /* Copy firmware into DMA-accessible memory */
+       fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       if (!fw) {
+               status = -ENOMEM;
+               goto out;
+       }
        len = fw_entry->size;
 
        if (len % 4)
@@ -210,7 +217,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
        rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
        rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
 
-       status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
+       status = rsi_copy_to_card(common, fw, len, num_blocks);
+       kfree(fw);
+
+out:
        release_firmware(fw_entry);
        return status;
 }
index 1106ce76707e1095fd523c5c541fa3740e9c9496..de4900862836a55019a3033ad7045ac2fcc186d6 100644 (file)
@@ -146,7 +146,12 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                return status;
        }
 
+       /* Copy firmware into DMA-accessible memory */
        fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       if (!fw) {
+               status = -ENOMEM;
+               goto out;
+       }
        len = fw_entry->size;
 
        if (len % 4)
@@ -158,6 +163,9 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
        rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
 
        status = rsi_copy_to_card(common, fw, len, num_blocks);
+       kfree(fw);
+
+out:
        release_firmware(fw_entry);
        return status;
 }
index 2b4ef256c6b9432675b2de9bae9b6cd95c6309d2..de62f5dcb62f7971fbc21adcc1212a1ed9ac68aa 100644 (file)
@@ -240,7 +240,6 @@ config RT2X00_LIB_USB
 
 config RT2X00_LIB
        tristate
-       select AVERAGE
 
 config RT2X00_LIB_FIRMWARE
        bool
index afba0739c3b87ded3965521b449408fef35d3d33..78cc035b2d1765576da6b17fe144425803cf7109 100644 (file)
@@ -54,7 +54,7 @@
 #define CSR_REG_BASE                   0x0400
 #define CSR_REG_SIZE                   0x0100
 #define EEPROM_BASE                    0x0000
-#define EEPROM_SIZE                    0x006a
+#define EEPROM_SIZE                    0x006e
 #define BBP_BASE                       0x0000
 #define BBP_SIZE                       0x0060
 #define RF_BASE                                0x0004
index 9bb398bed9bb68ba133d702c2e5be8b0b089a8ae..3282ddb766f4224a8e10e037fa7ea4332dfe0e27 100644 (file)
@@ -254,6 +254,8 @@ struct link_qual {
        int tx_failed;
 };
 
+DECLARE_EWMA(rssi, 1024, 8)
+
 /*
  * Antenna settings about the currently active link.
  */
@@ -285,7 +287,7 @@ struct link_ant {
         * Similar to the avg_rssi in the link_qual structure
         * this value is updated by using the walking average.
         */
-       struct ewma rssi_ant;
+       struct ewma_rssi rssi_ant;
 };
 
 /*
@@ -314,7 +316,7 @@ struct link {
        /*
         * Currently active average RSSI value
         */
-       struct ewma avg_rssi;
+       struct ewma_rssi avg_rssi;
 
        /*
         * Work structure for scheduling periodic link tuning.
index 9b941c0c12648d4a5b8e42c1e0679d9882febce1..017188e5a73628b2c0454fbb1916646da9ab650d 100644 (file)
  */
 #define DEFAULT_RSSI           -128
 
-/* Constants for EWMA calculations. */
-#define RT2X00_EWMA_FACTOR     1024
-#define RT2X00_EWMA_WEIGHT     8
-
-static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+static inline int rt2x00link_get_avg_rssi(struct ewma_rssi *ewma)
 {
        unsigned long avg;
 
-       avg = ewma_read(ewma);
+       avg = ewma_rssi_read(ewma);
        if (avg)
                return -avg;
 
@@ -76,8 +72,7 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
 
 static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
 {
-       ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
-                 RT2X00_EWMA_WEIGHT);
+       ewma_rssi_init(&rt2x00dev->link.ant.rssi_ant);
 }
 
 static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -225,12 +220,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
        /*
         * Update global RSSI
         */
-       ewma_add(&link->avg_rssi, -rxdesc->rssi);
+       ewma_rssi_add(&link->avg_rssi, -rxdesc->rssi);
 
        /*
         * Update antenna RSSI
         */
-       ewma_add(&ant->rssi_ant, -rxdesc->rssi);
+       ewma_rssi_add(&ant->rssi_ant, -rxdesc->rssi);
 }
 
 void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -285,8 +280,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
         */
        rt2x00dev->link.count = 0;
        memset(qual, 0, sizeof(*qual));
-       ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
-                 RT2X00_EWMA_WEIGHT);
+       ewma_rssi_init(&rt2x00dev->link.avg_rssi);
 
        /*
         * Restore the VGC level as stored in the registers,
index 3b3a88b53b119909112a806ee71ab4d4bfa67a79..585d0883c7e58760eed503de64ced513c8331c82 100644 (file)
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+       struct rtl_tcb_desc tcb_desc;
 
-       if (skb)
-               rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
+       if (skb) {
+               memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+               rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+       }
 }
 
 static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
index 7cf36619f25005e4395251ba663b2b15cd14e41a..25db369b5d18c4e62a9862015f572d18affdf53d 100644 (file)
@@ -818,26 +818,29 @@ static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw)
 
 static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
 {
-       u16                     value16;
-
+       u16 value16;
+       u32 value32;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
-       mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
-                     RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
-                     RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
-       rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+       value32 = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
+                  RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+                  RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&value32));
        /* Accept all multicast address */
        rtl_write_dword(rtlpriv,  REG_MAR, 0xFFFFFFFF);
        rtl_write_dword(rtlpriv,  REG_MAR + 4, 0xFFFFFFFF);
        /* Accept all management frames */
        value16 = 0xFFFF;
-       rtl92c_set_mgt_filter(hw, value16);
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER,
+                                     (u8 *)(&value16));
        /* Reject all control frame - default value is 0 */
-       rtl92c_set_ctrl_filter(hw, 0x0);
+       value16 = 0x0;
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER,
+                                     (u8 *)(&value16));
        /* Accept all data frames */
        value16 = 0xFFFF;
-       rtl92c_set_data_filter(hw, value16);
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DATA_FILTER,
+                                     (u8 *)(&value16));
 }
 
 static void _rtl92cu_init_beacon_parameters(struct ieee80211_hw *hw)
@@ -988,17 +991,6 @@ static void _InitPABias(struct ieee80211_hw *hw)
        }
 }
 
-static void _update_mac_setting(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
-       mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
-       mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-       mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-       mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
-}
-
 int rtl92cu_hw_init(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1068,7 +1060,6 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
        }
        _rtl92cu_hw_configure(hw);
        _InitPABias(hw);
-       _update_mac_setting(hw);
        rtl92c_dm_init(hw);
 exit:
        local_irq_restore(flags);
@@ -1620,7 +1611,6 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
        enum wireless_mode wirelessmode = mac->mode;
        u8 idx = 0;
 
@@ -1829,63 +1819,10 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                                u4b_ac_param);
                                break;
                        default:
-                               RT_ASSERT(false,
-                                         "SetHwReg8185(): invalid aci: %d !\n",
+                               RT_ASSERT(false, "invalid aci: %d !\n",
                                          e_aci);
                                break;
                        }
-                       if (rtlusb->acm_method != EACMWAY2_SW)
-                               rtlpriv->cfg->ops->set_hw_reg(hw,
-                                        HW_VAR_ACM_CTRL, &e_aci);
-                       break;
-               }
-       case HW_VAR_ACM_CTRL:{
-                       u8 e_aci = *val;
-                       union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
-                                                       (&(mac->ac[0].aifs));
-                       u8 acm = p_aci_aifsn->f.acm;
-                       u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
-
-                       acm_ctrl =
-                           acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
-                       if (acm) {
-                               switch (e_aci) {
-                               case AC0_BE:
-                                       acm_ctrl |= AcmHw_BeqEn;
-                                       break;
-                               case AC2_VI:
-                                       acm_ctrl |= AcmHw_ViqEn;
-                                       break;
-                               case AC3_VO:
-                                       acm_ctrl |= AcmHw_VoqEn;
-                                       break;
-                               default:
-                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-                                                "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
-                                                acm);
-                                       break;
-                               }
-                       } else {
-                               switch (e_aci) {
-                               case AC0_BE:
-                                       acm_ctrl &= (~AcmHw_BeqEn);
-                                       break;
-                               case AC2_VI:
-                                       acm_ctrl &= (~AcmHw_ViqEn);
-                                       break;
-                               case AC3_VO:
-                                       acm_ctrl &= (~AcmHw_VoqEn);
-                                       break;
-                               default:
-                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                                "switch case not processed\n");
-                                       break;
-                               }
-                       }
-                       RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
-                                "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
-                                acm_ctrl);
-                       rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
                        break;
                }
        case HW_VAR_RCR:{
@@ -1999,12 +1936,15 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
        case HW_VAR_MGT_FILTER:
                rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+               mac->rx_mgt_filter = *(u16 *)val;
                break;
        case HW_VAR_CTRL_FILTER:
                rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+               mac->rx_ctrl_filter = *(u16 *)val;
                break;
        case HW_VAR_DATA_FILTER:
                rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+               mac->rx_data_filter = *(u16 *)val;
                break;
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
index 1c55a002d4bd9d9cc48986483a29cbfd5228b0ab..035713311a4ab3cdd160d079fa7376fd06c6c7ad 100644 (file)
@@ -393,59 +393,9 @@ void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
 void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       u32 u4b_ac_param;
 
        rtl92c_dm_init_edca_turbo(hw);
-       u4b_ac_param = (u32) mac->ac[aci].aifs;
-       u4b_ac_param |=
-           ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
-           AC_PARAM_ECW_MIN_OFFSET;
-       u4b_ac_param |=
-           ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
-           AC_PARAM_ECW_MAX_OFFSET;
-       u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
-                        AC_PARAM_TXOP_OFFSET;
-       RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, "queue:%x, ac_param:%x\n",
-                aci, u4b_ac_param);
-       switch (aci) {
-       case AC1_BK:
-               rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
-               break;
-       case AC0_BE:
-               rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
-               break;
-       case AC2_VI:
-               rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
-               break;
-       case AC3_VO:
-               rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
-               break;
-       default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
-               break;
-       }
-}
-
-/*-------------------------------------------------------------------------
- * HW MAC Address
- *-------------------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
-{
-       u32 i;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       for (i = 0 ; i < ETH_ALEN ; i++)
-               rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
-
-       RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
-                "MAC Address: %02X-%02X-%02X-%02X-%02X-%02X\n",
-                rtl_read_byte(rtlpriv, REG_MACID),
-                rtl_read_byte(rtlpriv, REG_MACID+1),
-                rtl_read_byte(rtlpriv, REG_MACID+2),
-                rtl_read_byte(rtlpriv, REG_MACID+3),
-                rtl_read_byte(rtlpriv, REG_MACID+4),
-                rtl_read_byte(rtlpriv, REG_MACID+5));
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)&aci);
 }
 
 void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
@@ -644,47 +594,6 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
        rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
 }
 
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-}
-
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
-}
-
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-}
-
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
-}
-
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       return rtl_read_word(rtlpriv,  REG_RXFLTMAP2);
-}
-
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
-}
 /*==============================================================*/
 
 static u8 _rtl92c_query_rxpwrpercentage(char antpower)
index e34f0f14ccd775954e56840fdddd1a736dc71aae..553a4bfac66894a2999a61314ea5c6a683aa4854 100644 (file)
@@ -48,7 +48,6 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
 /*---------------------------------------------------------------
  *     Hardware init functions
  *---------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
 void rtl92c_init_interrupt(struct ieee80211_hw *hw);
 void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
 
@@ -73,15 +72,6 @@ void rtl92c_init_retry_function(struct ieee80211_hw *hw);
 void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
 void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
 
-/* For filter */
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
-
-
 u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
 
 struct rx_fwinfo_92c {
index 23806c243a53174db28aa1b0a99e5a01119206d7..fd4a5353d2169e39ef3f3a12c26aee577778f162 100644 (file)
@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
        {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
        {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
index a863a44f9e16a0574b5a455736a9ba02863a9683..018340aedf0993b641a521c59e3b0350f6969085 100644 (file)
@@ -449,7 +449,7 @@ static void _rtl92ee_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
                                 "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
                                  rate_section, path, txnum);
                        break;
-               };
+               }
        } else {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
                         "Invalid Band %d\n", band);
@@ -489,7 +489,7 @@ static u8 _rtl92ee_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
                                 "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
                                  rate_section, path, txnum);
                        break;
-               };
+               }
        } else {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
                         "Invalid Band %d()\n", band);
@@ -853,7 +853,7 @@ static u8 _rtl92ee_get_rate_section_index(u32 regaddr)
                else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
                        index = (u8)((regaddr - 0xE20) / 4);
                break;
-       };
+       }
        return index;
 }
 
index 3ba1e3218ed2957913a424cd06e6cb85038e2066..d091f1d5f91eb2bbd5f399e01187c7de47cf4aab 100644 (file)
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
 module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
                   bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
index c8a33f43916ef2dffe116229e661985a152ecc47..342a2993ef986c3342c02b34188ff4cc5f0fd5be 100644 (file)
@@ -93,27 +93,26 @@ struct wl18xx_acx_checksum_state {
 
 
 struct wl18xx_acx_error_stats {
-       u32 error_frame;
-       u32 error_null_Frame_tx_start;
-       u32 error_numll_frame_cts_start;
-       u32 error_bar_retry;
-       u32 error_frame_cts_nul_flid;
-} __packed;
-
-struct wl18xx_acx_debug_stats {
-       u32 debug1;
-       u32 debug2;
-       u32 debug3;
-       u32 debug4;
-       u32 debug5;
-       u32 debug6;
-} __packed;
-
-struct wl18xx_acx_ring_stats {
-       u32 prepared_descs;
-       u32 tx_cmplt;
+       u32 error_frame_non_ctrl;
+       u32 error_frame_ctrl;
+       u32 error_frame_during_protection;
+       u32 null_frame_tx_start;
+       u32 null_frame_cts_start;
+       u32 bar_retry;
+       u32 num_frame_cts_nul_flid;
+       u32 tx_abort_failure;
+       u32 tx_resume_failure;
+       u32 rx_cmplt_db_overflow_cnt;
+       u32 elp_while_rx_exch;
+       u32 elp_while_tx_exch;
+       u32 elp_while_tx;
+       u32 elp_while_nvic_pending;
+       u32 rx_excessive_frame_len;
+       u32 burst_mismatch;
+       u32 tbc_exch_mismatch;
 } __packed;
 
+#define NUM_OF_RATES_INDEXES 30
 struct wl18xx_acx_tx_stats {
        u32 tx_prepared_descs;
        u32 tx_cmplt;
@@ -123,7 +122,7 @@ struct wl18xx_acx_tx_stats {
        u32 tx_data_programmed;
        u32 tx_burst_programmed;
        u32 tx_starts;
-       u32 tx_imm_resp;
+       u32 tx_stop;
        u32 tx_start_templates;
        u32 tx_start_int_templates;
        u32 tx_start_fw_gen;
@@ -132,13 +131,14 @@ struct wl18xx_acx_tx_stats {
        u32 tx_exch;
        u32 tx_retry_template;
        u32 tx_retry_data;
+       u32 tx_retry_per_rate[NUM_OF_RATES_INDEXES];
        u32 tx_exch_pending;
        u32 tx_exch_expiry;
        u32 tx_done_template;
        u32 tx_done_data;
        u32 tx_done_int_template;
-       u32 tx_frame_checksum;
-       u32 tx_checksum_result;
+       u32 tx_cfe1;
+       u32 tx_cfe2;
        u32 frag_called;
        u32 frag_mpdu_alloc_failed;
        u32 frag_init_called;
@@ -166,11 +166,8 @@ struct wl18xx_acx_rx_stats {
        u32 rx_cmplt_task;
        u32 rx_phy_hdr;
        u32 rx_timeout;
+       u32 rx_rts_timeout;
        u32 rx_timeout_wa;
-       u32 rx_wa_density_dropped_frame;
-       u32 rx_wa_ba_not_expected;
-       u32 rx_frame_checksum;
-       u32 rx_checksum_result;
        u32 defrag_called;
        u32 defrag_init_called;
        u32 defrag_in_process_called;
@@ -180,6 +177,7 @@ struct wl18xx_acx_rx_stats {
        u32 decrypt_key_not_found;
        u32 defrag_need_decrypt;
        u32 rx_tkip_replays;
+       u32 rx_xfr;
 } __packed;
 
 struct wl18xx_acx_isr_stats {
@@ -194,21 +192,13 @@ struct wl18xx_acx_pwr_stats {
        u32 connection_out_of_sync;
        u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
        u32 rcvd_awake_bcns_cnt;
-} __packed;
-
-struct wl18xx_acx_event_stats {
-       u32 calibration;
-       u32 rx_mismatch;
-       u32 rx_mem_empty;
-} __packed;
-
-struct wl18xx_acx_ps_poll_stats {
-       u32 ps_poll_timeouts;
-       u32 upsd_timeouts;
-       u32 upsd_max_ap_turn;
-       u32 ps_poll_max_ap_turn;
-       u32 ps_poll_utilization;
-       u32 upsd_utilization;
+       u32 sleep_time_count;
+       u32 sleep_time_avg;
+       u32 sleep_cycle_avg;
+       u32 sleep_percent;
+       u32 ap_sleep_active_conf;
+       u32 ap_sleep_user_conf;
+       u32 ap_sleep_counter;
 } __packed;
 
 struct wl18xx_acx_rx_filter_stats {
@@ -228,11 +218,11 @@ struct wl18xx_acx_rx_rate_stats {
 } __packed;
 
 #define AGGR_STATS_TX_AGG      16
-#define AGGR_STATS_TX_RATE     16
 #define AGGR_STATS_RX_SIZE_LEN 16
 
 struct wl18xx_acx_aggr_stats {
-       u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
+       u32 tx_agg_rate[AGGR_STATS_TX_AGG];
+       u32 tx_agg_len[AGGR_STATS_TX_AGG];
        u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
 } __packed;
 
@@ -241,8 +231,6 @@ struct wl18xx_acx_aggr_stats {
 struct wl18xx_acx_pipeline_stats {
        u32 hs_tx_stat_fifo_int;
        u32 hs_rx_stat_fifo_int;
-       u32 tcp_tx_stat_fifo_int;
-       u32 tcp_rx_stat_fifo_int;
        u32 enc_tx_stat_fifo_int;
        u32 enc_rx_stat_fifo_int;
        u32 rx_complete_stat_fifo_int;
@@ -250,38 +238,61 @@ struct wl18xx_acx_pipeline_stats {
        u32 post_proc_swi;
        u32 sec_frag_swi;
        u32 pre_to_defrag_swi;
-       u32 defrag_to_csum_swi;
-       u32 csum_to_rx_xfer_swi;
+       u32 defrag_to_rx_xfer_swi;
        u32 dec_packet_in;
        u32 dec_packet_in_fifo_full;
        u32 dec_packet_out;
-       u32 cs_rx_packet_in;
-       u32 cs_rx_packet_out;
        u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
+       u16 padding;
+} __packed;
+
+#define DIVERSITY_STATS_NUM_OF_ANT     2
+
+struct wl18xx_acx_diversity_stats {
+       u32 num_of_packets_per_ant[DIVERSITY_STATS_NUM_OF_ANT];
+       u32 total_num_of_toggles;
+} __packed;
+
+struct wl18xx_acx_thermal_stats {
+       u16 irq_thr_low;
+       u16 irq_thr_high;
+       u16 tx_stop;
+       u16 tx_resume;
+       u16 false_irq;
+       u16 adc_source_unexpected;
+} __packed;
+
+#define WL18XX_NUM_OF_CALIBRATIONS_ERRORS 18
+struct wl18xx_acx_calib_failure_stats {
+       u16 fail_count[WL18XX_NUM_OF_CALIBRATIONS_ERRORS];
+       u32 calib_count;
+} __packed;
+
+struct wl18xx_roaming_stats {
+       s32 rssi_level;
 } __packed;
 
-struct wl18xx_acx_mem_stats {
-       u32 rx_free_mem_blks;
-       u32 tx_free_mem_blks;
-       u32 fwlog_free_mem_blks;
-       u32 fw_gen_free_mem_blks;
+struct wl18xx_dfs_stats {
+       u32 num_of_radar_detections;
 } __packed;
 
 struct wl18xx_acx_statistics {
        struct acx_header header;
 
        struct wl18xx_acx_error_stats           error;
-       struct wl18xx_acx_debug_stats           debug;
        struct wl18xx_acx_tx_stats              tx;
        struct wl18xx_acx_rx_stats              rx;
        struct wl18xx_acx_isr_stats             isr;
        struct wl18xx_acx_pwr_stats             pwr;
-       struct wl18xx_acx_ps_poll_stats         ps_poll;
        struct wl18xx_acx_rx_filter_stats       rx_filter;
        struct wl18xx_acx_rx_rate_stats         rx_rate;
        struct wl18xx_acx_aggr_stats            aggr_size;
        struct wl18xx_acx_pipeline_stats        pipeline;
-       struct wl18xx_acx_mem_stats             mem;
+       struct wl18xx_acx_diversity_stats       diversity;
+       struct wl18xx_acx_thermal_stats         thermal;
+       struct wl18xx_acx_calib_failure_stats   calib;
+       struct wl18xx_roaming_stats             roaming;
+       struct wl18xx_dfs_stats                 dfs;
 } __packed;
 
 struct wl18xx_acx_clear_statistics {
index 8c6a1c86f526981ba3f947f0948fd3acb46d9e6f..4edfe28395f03be65d2deabbd15d03ccc13a1af0 100644 (file)
        DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
 
 
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
-
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_non_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_during_protection, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_tx_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_cts_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, bar_retry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, num_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_abort_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_resume_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_cmplt_db_overflow_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_rx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_nvic_pending, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_excessive_frame_len, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, burst_mismatch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tbc_exch_mismatch, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
@@ -57,7 +62,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
@@ -66,13 +71,15 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(tx, tx_retry_per_rate,
+                                 NUM_OF_RATES_INDEXES);
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe1, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe2, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
@@ -97,11 +104,8 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_rts_timeout, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
@@ -111,6 +115,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_xfr, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
 
@@ -120,14 +125,13 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
                                  PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
 WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
-
-
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_count, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_cycle_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_percent, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_active_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_user_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_counter, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
@@ -141,14 +145,14 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
 
-WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
-                                 AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_rate,
+                                 AGGR_STATS_TX_AGG);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_len,
+                                 AGGR_STATS_TX_AGG);
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
                                  AGGR_STATS_RX_SIZE_LEN);
 
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
@@ -156,21 +160,32 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_rx_xfer_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
                                  PIPE_STATS_HW_FIFO);
 
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(diversity, num_of_packets_per_ant,
+                                 DIVERSITY_STATS_NUM_OF_ANT);
+WL18XX_DEBUGFS_FWSTATS_FILE(diversity, total_num_of_toggles, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_low, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_high, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_stop, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_resume, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, false_irq, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, adc_source_unexpected, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(calib, fail_count,
+                                 WL18XX_NUM_OF_CALIBRATIONS_ERRORS);
+WL18XX_DEBUGFS_FWSTATS_FILE(calib, calib_count, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(roaming, rssi_level, "%d");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(dfs, num_of_radar_detections, "%d");
 
 static ssize_t conf_read(struct file *file, char __user *user_buf,
                         size_t count, loff_t *ppos)
@@ -350,18 +365,23 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
 
        DEBUGFS_ADD(clear_fw_stats, stats);
 
-       DEBUGFS_FWSTATS_ADD(debug, debug1);
-       DEBUGFS_FWSTATS_ADD(debug, debug2);
-       DEBUGFS_FWSTATS_ADD(debug, debug3);
-       DEBUGFS_FWSTATS_ADD(debug, debug4);
-       DEBUGFS_FWSTATS_ADD(debug, debug5);
-       DEBUGFS_FWSTATS_ADD(debug, debug6);
-
-       DEBUGFS_FWSTATS_ADD(error, error_frame);
-       DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
-       DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
-       DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
-       DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
+       DEBUGFS_FWSTATS_ADD(error, error_frame_non_ctrl);
+       DEBUGFS_FWSTATS_ADD(error, error_frame_ctrl);
+       DEBUGFS_FWSTATS_ADD(error, error_frame_during_protection);
+       DEBUGFS_FWSTATS_ADD(error, null_frame_tx_start);
+       DEBUGFS_FWSTATS_ADD(error, null_frame_cts_start);
+       DEBUGFS_FWSTATS_ADD(error, bar_retry);
+       DEBUGFS_FWSTATS_ADD(error, num_frame_cts_nul_flid);
+       DEBUGFS_FWSTATS_ADD(error, tx_abort_failure);
+       DEBUGFS_FWSTATS_ADD(error, tx_resume_failure);
+       DEBUGFS_FWSTATS_ADD(error, rx_cmplt_db_overflow_cnt);
+       DEBUGFS_FWSTATS_ADD(error, elp_while_rx_exch);
+       DEBUGFS_FWSTATS_ADD(error, elp_while_tx_exch);
+       DEBUGFS_FWSTATS_ADD(error, elp_while_tx);
+       DEBUGFS_FWSTATS_ADD(error, elp_while_nvic_pending);
+       DEBUGFS_FWSTATS_ADD(error, rx_excessive_frame_len);
+       DEBUGFS_FWSTATS_ADD(error, burst_mismatch);
+       DEBUGFS_FWSTATS_ADD(error, tbc_exch_mismatch);
 
        DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
        DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
@@ -371,7 +391,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
        DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
        DEBUGFS_FWSTATS_ADD(tx, tx_starts);
-       DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
+       DEBUGFS_FWSTATS_ADD(tx, tx_stop);
        DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
        DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
        DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
@@ -380,13 +400,14 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(tx, tx_exch);
        DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
        DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
+       DEBUGFS_FWSTATS_ADD(tx, tx_retry_per_rate);
        DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
        DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
        DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
        DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
        DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
-       DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
-       DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
+       DEBUGFS_FWSTATS_ADD(tx, tx_cfe1);
+       DEBUGFS_FWSTATS_ADD(tx, tx_cfe2);
        DEBUGFS_FWSTATS_ADD(tx, frag_called);
        DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
        DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
@@ -411,11 +432,8 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
        DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
        DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
+       DEBUGFS_FWSTATS_ADD(rx, rx_rts_timeout);
        DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
-       DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
-       DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
-       DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
-       DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
        DEBUGFS_FWSTATS_ADD(rx, defrag_called);
        DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
        DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
@@ -425,6 +443,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
        DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
        DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
+       DEBUGFS_FWSTATS_ADD(rx, rx_xfr);
 
        DEBUGFS_FWSTATS_ADD(isr, irqs);
 
@@ -433,13 +452,13 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
        DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
        DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
-
-       DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
-       DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
-       DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
-       DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
-       DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
-       DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
+       DEBUGFS_FWSTATS_ADD(pwr, sleep_time_count);
+       DEBUGFS_FWSTATS_ADD(pwr, sleep_time_avg);
+       DEBUGFS_FWSTATS_ADD(pwr, sleep_cycle_avg);
+       DEBUGFS_FWSTATS_ADD(pwr, sleep_percent);
+       DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_active_conf);
+       DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_user_conf);
+       DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_counter);
 
        DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
        DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
@@ -453,12 +472,11 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
 
        DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
 
-       DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
+       DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_rate);
+       DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_len);
        DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
 
        DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
-       DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
-       DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
        DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
        DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
        DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
@@ -466,19 +484,29 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
        DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
        DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
-       DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
-       DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
+       DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_rx_xfer_swi);
        DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
        DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
        DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
-       DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
-       DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
        DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
 
-       DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
-       DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
-       DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
-       DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
+       DEBUGFS_FWSTATS_ADD(diversity, num_of_packets_per_ant);
+       DEBUGFS_FWSTATS_ADD(diversity, total_num_of_toggles);
+
+       DEBUGFS_FWSTATS_ADD(thermal, irq_thr_low);
+       DEBUGFS_FWSTATS_ADD(thermal, irq_thr_high);
+       DEBUGFS_FWSTATS_ADD(thermal, tx_stop);
+       DEBUGFS_FWSTATS_ADD(thermal, tx_resume);
+       DEBUGFS_FWSTATS_ADD(thermal, false_irq);
+       DEBUGFS_FWSTATS_ADD(thermal, adc_source_unexpected);
+
+       DEBUGFS_FWSTATS_ADD(calib, fail_count);
+
+       DEBUGFS_FWSTATS_ADD(calib, calib_count);
+
+       DEBUGFS_FWSTATS_ADD(roaming, rssi_level);
+
+       DEBUGFS_FWSTATS_ADD(dfs, num_of_radar_detections);
 
        DEBUGFS_ADD(conf, moddir);
        DEBUGFS_ADD(radar_detection, moddir);
index 1a83e190fc15e4158b5e441cc267ad149d465abe..28577a31549d1569032d63457464fe11fdf44d32 100644 (file)
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
 {
        atomic_dec(&queue->inflight_packets);
+
+       /* Wake the dealloc thread _after_ decrementing inflight_packets so
+        * that if kthread_stop() has already been called, the dealloc thread
+        * does not wait forever with nothing to wake it.
+        */
+       wake_up(&queue->dealloc_wq);
 }
 
 int xenvif_schedulable(struct xenvif *vif)
index 7d50711476fe1e88debca95beb790d770261f036..3f44b522b8311a2c64eba48e6a9b7217ea0cb3a7 100644 (file)
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
                                                        struct sk_buff *skb,
                                                        struct xen_netif_tx_request *txp,
-                                                       struct gnttab_map_grant_ref *gop)
+                                                       struct gnttab_map_grant_ref *gop,
+                                                       unsigned int frag_overflow,
+                                                       struct sk_buff *nskb)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        skb_frag_t *frags = shinfo->frags;
        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
        int start;
        pending_ring_idx_t index;
-       unsigned int nr_slots, frag_overflow = 0;
+       unsigned int nr_slots;
 
-       /* At this point shinfo->nr_frags is in fact the number of
-        * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
-        */
-       if (shinfo->nr_frags > MAX_SKB_FRAGS) {
-               frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
-               BUG_ON(frag_overflow > MAX_SKB_FRAGS);
-               shinfo->nr_frags = MAX_SKB_FRAGS;
-       }
        nr_slots = shinfo->nr_frags;
 
        /* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
        }
 
        if (frag_overflow) {
-               struct sk_buff *nskb = xenvif_alloc_skb(0);
-               if (unlikely(nskb == NULL)) {
-                       if (net_ratelimit())
-                               netdev_err(queue->vif->dev,
-                                          "Can't allocate the frag_list skb.\n");
-                       return NULL;
-               }
 
                shinfo = skb_shinfo(nskb);
                frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                     unsigned *copy_ops,
                                     unsigned *map_ops)
 {
-       struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
-       struct sk_buff *skb;
+       struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+       struct sk_buff *skb, *nskb;
        int ret;
+       unsigned int frag_overflow;
 
        while (skb_queue_len(&queue->tx_queue) < budget) {
                struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        break;
                }
 
+               skb_shinfo(skb)->nr_frags = ret;
+               if (data_len < txreq.size)
+                       skb_shinfo(skb)->nr_frags++;
+               /* At this point shinfo->nr_frags is in fact the number of
+                * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+                */
+               frag_overflow = 0;
+               nskb = NULL;
+               if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+                       frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+                       BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+                       skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+                       nskb = xenvif_alloc_skb(0);
+                       if (unlikely(nskb == NULL)) {
+                               kfree_skb(skb);
+                               xenvif_tx_err(queue, &txreq, idx);
+                               if (net_ratelimit())
+                                       netdev_err(queue->vif->dev,
+                                                  "Can't allocate the frag_list skb.\n");
+                               break;
+                       }
+               }
+
                if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
                        struct xen_netif_extra_info *gso;
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
                                /* Failure in xenvif_set_skb_gso is fatal. */
                                kfree_skb(skb);
+                               kfree_skb(nskb);
                                break;
                        }
                }
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                (*copy_ops)++;
 
-               skb_shinfo(skb)->nr_frags = ret;
                if (data_len < txreq.size) {
-                       skb_shinfo(skb)->nr_frags++;
                        frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
                                             pending_idx);
                        xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                queue->pending_cons++;
 
-               request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
-               if (request_gop == NULL) {
-                       kfree_skb(skb);
-                       xenvif_tx_err(queue, &txreq, idx);
-                       break;
-               }
-               gop = request_gop;
+               gop = xenvif_get_requests(queue, skb, txfrags, gop,
+                                         frag_overflow, nskb);
 
                __skb_queue_tail(&queue->tx_queue, skb);
 
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
                smp_wmb();
                queue->dealloc_prod++;
        } while (ubuf);
-       wake_up(&queue->dealloc_wq);
        spin_unlock_irqrestore(&queue->callback_lock, flags);
 
        if (likely(zerocopy_success))
index f948c46d51329970c186b2886c267ffba2e807db..002062db2f1dc791b73a1a98c5a4368dfe756179 100644 (file)
@@ -1336,7 +1336,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
 
        netif_carrier_off(info->netdev);
 
-       for (i = 0; i < num_queues; ++i) {
+       for (i = 0; i < num_queues && info->queues; ++i) {
                struct netfront_queue *queue = &info->queues[i];
 
                if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
@@ -2101,7 +2101,8 @@ static int xennet_remove(struct xenbus_device *dev)
 
        unregister_netdev(info->netdev);
 
-       xennet_destroy_queues(info);
+       if (info->queues)
+               xennet_destroy_queues(info);
        xennet_free_netdev(info->netdev);
 
        return 0;
index 722673cb785b1a257ff3963b20457e0d4d3cceb9..6639cd1cae368a676fb54b957b6472c3b187c539 100644 (file)
@@ -74,4 +74,5 @@ source "drivers/nfc/nfcmrvl/Kconfig"
 source "drivers/nfc/st21nfca/Kconfig"
 source "drivers/nfc/st-nci/Kconfig"
 source "drivers/nfc/nxp-nci/Kconfig"
+source "drivers/nfc/s3fwrn5/Kconfig"
 endmenu
index 368b6dfe71b311297d4479ce82b94c15be046ce1..2757fe1b8aa52b0047babfb363204c32aaf84e94 100644 (file)
@@ -14,3 +14,4 @@ obj-$(CONFIG_NFC_TRF7970A)    += trf7970a.o
 obj-$(CONFIG_NFC_ST21NFCA)     += st21nfca/
 obj-$(CONFIG_NFC_ST_NCI)       += st-nci/
 obj-$(CONFIG_NFC_NXP_NCI)      += nxp-nci/
+obj-$(CONFIG_NFC_S3FWRN5)      += s3fwrn5/
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
new file mode 100644 (file)
index 0000000..7e3b255
--- /dev/null
@@ -0,0 +1,19 @@
+config NFC_S3FWRN5
+       tristate
+       ---help---
+         Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
+         of chip. It's intended to be used by PHYs to avoid duplicating lots
+         of common code.
+
+config NFC_S3FWRN5_I2C
+       tristate "Samsung S3FWRN5 I2C support"
+       depends on NFC_NCI && I2C
+       select NFC_S3FWRN5
+       default n
+       ---help---
+         This module adds support for an I2C interface to the S3FWRN5 chip.
+         Select this if your platform is using the I2C bus.
+
+         To compile this driver as a module, choose m here. The module will
+         be called s3fwrn5_i2c.ko.
+         Say N if unsure.
diff --git a/drivers/nfc/s3fwrn5/Makefile b/drivers/nfc/s3fwrn5/Makefile
new file mode 100644 (file)
index 0000000..3381c34
--- /dev/null
@@ -0,0 +1,11 @@
+#
+# Makefile for Samsung S3FWRN5 NFC driver
+#
+
+s3fwrn5-objs = core.o firmware.o nci.o
+s3fwrn5_i2c-objs = i2c.o
+
+obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5.o
+obj-$(CONFIG_NFC_S3FWRN5_I2C) += s3fwrn5_i2c.o
+
+ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
new file mode 100644 (file)
index 0000000..0d866ca
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <net/nfc/nci_core.h>
+
+#include "s3fwrn5.h"
+#include "firmware.h"
+#include "nci.h"
+
+#define S3FWRN5_NFC_PROTOCOLS  (NFC_PROTO_JEWEL_MASK | \
+                               NFC_PROTO_MIFARE_MASK | \
+                               NFC_PROTO_FELICA_MASK | \
+                               NFC_PROTO_ISO14443_MASK | \
+                               NFC_PROTO_ISO14443_B_MASK | \
+                               NFC_PROTO_ISO15693_MASK)
+
+static int s3fwrn5_firmware_update(struct s3fwrn5_info *info)
+{
+       bool need_update;
+       int ret;
+
+       s3fwrn5_fw_init(&info->fw_info, "sec_s3fwrn5_firmware.bin");
+
+       /* Update firmware */
+
+       s3fwrn5_set_wake(info, false);
+       s3fwrn5_set_mode(info, S3FWRN5_MODE_FW);
+
+       ret = s3fwrn5_fw_setup(&info->fw_info);
+       if (ret < 0)
+               return ret;
+
+       need_update = s3fwrn5_fw_check_version(&info->fw_info,
+               info->ndev->manufact_specific_info);
+       if (!need_update)
+               goto out;
+
+       dev_info(&info->ndev->nfc_dev->dev, "Detected new firmware version\n");
+
+       ret = s3fwrn5_fw_download(&info->fw_info);
+       if (ret < 0)
+               goto out;
+
+       /* Update RF configuration */
+
+       s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+
+       s3fwrn5_set_wake(info, true);
+       ret = s3fwrn5_nci_rf_configure(info, "sec_s3fwrn5_rfreg.bin");
+       s3fwrn5_set_wake(info, false);
+
+out:
+       s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+       s3fwrn5_fw_cleanup(&info->fw_info);
+       return ret;
+}
+
+static int s3fwrn5_nci_open(struct nci_dev *ndev)
+{
+       struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+       if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_COLD)
+               return  -EBUSY;
+
+       s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+       s3fwrn5_set_wake(info, true);
+
+       return 0;
+}
+
+static int s3fwrn5_nci_close(struct nci_dev *ndev)
+{
+       struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+       s3fwrn5_set_wake(info, false);
+       s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+       return 0;
+}
+
+static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+       struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+       int ret;
+
+       mutex_lock(&info->mutex);
+
+       if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_NCI) {
+               mutex_unlock(&info->mutex);
+               return -EINVAL;
+       }
+
+       ret = s3fwrn5_write(info, skb);
+       if (ret < 0)
+               kfree_skb(skb);
+
+       mutex_unlock(&info->mutex);
+       return ret;
+}
+
+static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
+{
+       struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+       int ret;
+
+       ret = s3fwrn5_firmware_update(info);
+       if (ret < 0)
+               goto out;
+
+       /* NCI core reset */
+
+       s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+       s3fwrn5_set_wake(info, true);
+
+       ret = nci_core_reset(info->ndev);
+       if (ret < 0)
+               goto out;
+
+       ret = nci_core_init(info->ndev);
+
+out:
+       return ret;
+}
+
+static struct nci_ops s3fwrn5_nci_ops = {
+       .open = s3fwrn5_nci_open,
+       .close = s3fwrn5_nci_close,
+       .send = s3fwrn5_nci_send,
+       .post_setup = s3fwrn5_nci_post_setup,
+};
+
+int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
+       struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload)
+{
+       struct s3fwrn5_info *info;
+       int ret;
+
+       info = devm_kzalloc(pdev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->phy_id = phy_id;
+       info->pdev = pdev;
+       info->phy_ops = phy_ops;
+       info->max_payload = max_payload;
+       mutex_init(&info->mutex);
+
+       s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+       s3fwrn5_nci_get_prop_ops(&s3fwrn5_nci_ops.prop_ops,
+               &s3fwrn5_nci_ops.n_prop_ops);
+
+       info->ndev = nci_allocate_device(&s3fwrn5_nci_ops,
+               S3FWRN5_NFC_PROTOCOLS, 0, 0);
+       if (!info->ndev)
+               return -ENOMEM;
+
+       nci_set_parent_dev(info->ndev, pdev);
+       nci_set_drvdata(info->ndev, info);
+
+       ret = nci_register_device(info->ndev);
+       if (ret < 0) {
+               nci_free_device(info->ndev);
+               return ret;
+       }
+
+       info->fw_info.ndev = info->ndev;
+
+       *ndev = info->ndev;
+
+       return ret;
+}
+EXPORT_SYMBOL(s3fwrn5_probe);
+
+void s3fwrn5_remove(struct nci_dev *ndev)
+{
+       struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+       s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+       nci_unregister_device(ndev);
+       nci_free_device(ndev);
+}
+EXPORT_SYMBOL(s3fwrn5_remove);
+
+int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
+       enum s3fwrn5_mode mode)
+{
+       switch (mode) {
+       case S3FWRN5_MODE_NCI:
+               return nci_recv_frame(ndev, skb);
+       case S3FWRN5_MODE_FW:
+               return s3fwrn5_fw_recv_frame(ndev, skb);
+       default:
+               return -ENODEV;
+       }
+}
+EXPORT_SYMBOL(s3fwrn5_recv_frame);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung S3FWRN5 NFC driver");
+MODULE_AUTHOR("Robert Baldyga <r.baldyga@samsung.com>");
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
new file mode 100644 (file)
index 0000000..64a9025
--- /dev/null
@@ -0,0 +1,511 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/crypto.h>
+#include <crypto/sha.h>
+
+#include "s3fwrn5.h"
+#include "firmware.h"
+
+struct s3fwrn5_fw_version {
+       __u8 major;
+       __u8 build1;
+       __u8 build2;
+       __u8 target;
+};
+
+static int s3fwrn5_fw_send_msg(struct s3fwrn5_fw_info *fw_info,
+       struct sk_buff *msg, struct sk_buff **rsp)
+{
+       struct s3fwrn5_info *info =
+               container_of(fw_info, struct s3fwrn5_info, fw_info);
+       long ret;
+
+       reinit_completion(&fw_info->completion);
+
+       ret = s3fwrn5_write(info, msg);
+       if (ret < 0)
+               return ret;
+
+       ret = wait_for_completion_interruptible_timeout(
+               &fw_info->completion, msecs_to_jiffies(1000));
+       if (ret < 0)
+               return ret;
+       else if (ret == 0)
+               return -ENXIO;
+
+       if (!fw_info->rsp)
+               return -EINVAL;
+
+       *rsp = fw_info->rsp;
+       fw_info->rsp = NULL;
+
+       return 0;
+}
+
+static int s3fwrn5_fw_prep_msg(struct s3fwrn5_fw_info *fw_info,
+       struct sk_buff **msg, u8 type, u8 code, const void *data, u16 len)
+{
+       struct s3fwrn5_fw_header hdr;
+       struct sk_buff *skb;
+
+       hdr.type = type | fw_info->parity;
+       fw_info->parity ^= 0x80;
+       hdr.code = code;
+       hdr.len = len;
+
+       skb = alloc_skb(S3FWRN5_FW_HDR_SIZE + len, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       memcpy(skb_put(skb, S3FWRN5_FW_HDR_SIZE), &hdr, S3FWRN5_FW_HDR_SIZE);
+       if (len)
+               memcpy(skb_put(skb, len), data, len);
+
+       *msg = skb;
+
+       return 0;
+}
+
+static int s3fwrn5_fw_get_bootinfo(struct s3fwrn5_fw_info *fw_info,
+       struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+{
+       struct sk_buff *msg, *rsp = NULL;
+       struct s3fwrn5_fw_header *hdr;
+       int ret;
+
+       /* Send GET_BOOTINFO command */
+
+       ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+               S3FWRN5_FW_CMD_GET_BOOTINFO, NULL, 0);
+       if (ret < 0)
+               return ret;
+
+       ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+       kfree_skb(msg);
+       if (ret < 0)
+               return ret;
+
+       hdr = (struct s3fwrn5_fw_header *) rsp->data;
+       if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       memcpy(bootinfo, rsp->data + S3FWRN5_FW_HDR_SIZE, 10);
+
+out:
+       kfree_skb(rsp);
+       return ret;
+}
+
+static int s3fwrn5_fw_enter_update_mode(struct s3fwrn5_fw_info *fw_info,
+       const void *hash_data, u16 hash_size,
+       const void *sig_data, u16 sig_size)
+{
+       struct s3fwrn5_fw_cmd_enter_updatemode args;
+       struct sk_buff *msg, *rsp = NULL;
+       struct s3fwrn5_fw_header *hdr;
+       int ret;
+
+       /* Send ENTER_UPDATE_MODE command */
+
+       args.hashcode_size = hash_size;
+       args.signature_size = sig_size;
+
+       ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+               S3FWRN5_FW_CMD_ENTER_UPDATE_MODE, &args, sizeof(args));
+       if (ret < 0)
+               return ret;
+
+       ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+       kfree_skb(msg);
+       if (ret < 0)
+               return ret;
+
+       hdr = (struct s3fwrn5_fw_header *) rsp->data;
+       if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+               ret = -EPROTO;
+               goto out;
+       }
+
+       kfree_skb(rsp);
+
+       /* Send hashcode data */
+
+       ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0,
+               hash_data, hash_size);
+       if (ret < 0)
+               return ret;
+
+       ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+       kfree_skb(msg);
+       if (ret < 0)
+               return ret;
+
+       hdr = (struct s3fwrn5_fw_header *) rsp->data;
+       if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+               ret = -EPROTO;
+               goto out;
+       }
+
+       kfree_skb(rsp);
+
+       /* Send signature data */
+
+       ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0,
+               sig_data, sig_size);
+       if (ret < 0)
+               return ret;
+
+       ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+       kfree_skb(msg);
+       if (ret < 0)
+               return ret;
+
+       hdr = (struct s3fwrn5_fw_header *) rsp->data;
+       if (hdr->code != S3FWRN5_FW_RET_SUCCESS)
+               ret = -EPROTO;
+
+out:
+       kfree_skb(rsp);
+       return ret;
+}
+
+static int s3fwrn5_fw_update_sector(struct s3fwrn5_fw_info *fw_info,
+       u32 base_addr, const void *data)
+{
+       struct s3fwrn5_fw_cmd_update_sector args;
+       struct sk_buff *msg, *rsp = NULL;
+       struct s3fwrn5_fw_header *hdr;
+       int ret, i;
+
+       /* Send UPDATE_SECTOR command */
+
+       args.base_address = base_addr;
+
+       ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+               S3FWRN5_FW_CMD_UPDATE_SECTOR, &args, sizeof(args));
+       if (ret < 0)
+               return ret;
+
+       ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+       kfree_skb(msg);
+       if (ret < 0)
+               return ret;
+
+       hdr = (struct s3fwrn5_fw_header *) rsp->data;
+       if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+               ret = -EPROTO;
+               goto err;
+       }
+
+       kfree_skb(rsp);
+
+       /* Send data split into 256-byte packets */
+
+       for (i = 0; i < 16; ++i) {
+               ret = s3fwrn5_fw_prep_msg(fw_info, &msg,
+                       S3FWRN5_FW_MSG_DATA, 0, data+256*i, 256);
+               if (ret < 0)
+                       break;
+
+               ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+               kfree_skb(msg);
+               if (ret < 0)
+                       break;
+
+               hdr = (struct s3fwrn5_fw_header *) rsp->data;
+               if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+                       ret = -EPROTO;
+                       goto err;
+               }
+
+               kfree_skb(rsp);
+       }
+
+       return ret;
+
+err:
+       kfree_skb(rsp);
+       return ret;
+}
+
+static int s3fwrn5_fw_complete_update_mode(struct s3fwrn5_fw_info *fw_info)
+{
+       struct sk_buff *msg, *rsp = NULL;
+       struct s3fwrn5_fw_header *hdr;
+       int ret;
+
+       /* Send COMPLETE_UPDATE_MODE command */
+
+       ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+               S3FWRN5_FW_CMD_COMPLETE_UPDATE_MODE, NULL, 0);
+       if (ret < 0)
+               return ret;
+
+       ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+       kfree_skb(msg);
+       if (ret < 0)
+               return ret;
+
+       hdr = (struct s3fwrn5_fw_header *) rsp->data;
+       if (hdr->code != S3FWRN5_FW_RET_SUCCESS)
+               ret = -EPROTO;
+
+       kfree_skb(rsp);
+
+       return ret;
+}
+
+/*
+ * Firmware header stucture:
+ *
+ * 0x00 - 0x0B : Date and time string (w/o NUL termination)
+ * 0x10 - 0x13 : Firmware version
+ * 0x14 - 0x17 : Signature address
+ * 0x18 - 0x1B : Signature size
+ * 0x1C - 0x1F : Firmware image address
+ * 0x20 - 0x23 : Firmware sectors count
+ * 0x24 - 0x27 : Custom signature address
+ * 0x28 - 0x2B : Custom signature size
+ */
+
+#define S3FWRN5_FW_IMAGE_HEADER_SIZE 44
+
+static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info)
+{
+       struct s3fwrn5_fw_image *fw = &fw_info->fw;
+       u32 sig_off;
+       u32 image_off;
+       u32 custom_sig_off;
+       int ret;
+
+       ret = request_firmware(&fw->fw, fw_info->fw_name,
+               &fw_info->ndev->nfc_dev->dev);
+       if (ret < 0)
+               return ret;
+
+       if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE)
+               return -EINVAL;
+
+       memcpy(fw->date, fw->fw->data + 0x00, 12);
+       fw->date[12] = '\0';
+
+       memcpy(&fw->version, fw->fw->data + 0x10, 4);
+
+       memcpy(&sig_off, fw->fw->data + 0x14, 4);
+       fw->sig = fw->fw->data + sig_off;
+       memcpy(&fw->sig_size, fw->fw->data + 0x18, 4);
+
+       memcpy(&image_off, fw->fw->data + 0x1C, 4);
+       fw->image = fw->fw->data + image_off;
+       memcpy(&fw->image_sectors, fw->fw->data + 0x20, 4);
+
+       memcpy(&custom_sig_off, fw->fw->data + 0x24, 4);
+       fw->custom_sig = fw->fw->data + custom_sig_off;
+       memcpy(&fw->custom_sig_size, fw->fw->data + 0x28, 4);
+
+       return 0;
+}
+
+static void s3fwrn5_fw_release_firmware(struct s3fwrn5_fw_info *fw_info)
+{
+       release_firmware(fw_info->fw.fw);
+}
+
+static int s3fwrn5_fw_get_base_addr(
+       struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo, u32 *base_addr)
+{
+       int i;
+       struct {
+               u8 version[4];
+               u32 base_addr;
+       } match[] = {
+               {{0x05, 0x00, 0x00, 0x00}, 0x00005000},
+               {{0x05, 0x00, 0x00, 0x01}, 0x00003000},
+               {{0x05, 0x00, 0x00, 0x02}, 0x00003000},
+               {{0x05, 0x00, 0x00, 0x03}, 0x00003000},
+               {{0x05, 0x00, 0x00, 0x05}, 0x00003000}
+       };
+
+       for (i = 0; i < ARRAY_SIZE(match); ++i)
+               if (bootinfo->hw_version[0] == match[i].version[0] &&
+                       bootinfo->hw_version[1] == match[i].version[1] &&
+                       bootinfo->hw_version[3] == match[i].version[3]) {
+                       *base_addr = match[i].base_addr;
+                       return 0;
+               }
+
+       return -EINVAL;
+}
+
+static inline bool
+s3fwrn5_fw_is_custom(struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+{
+       return !!bootinfo->hw_version[2];
+}
+
+int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
+{
+       struct s3fwrn5_fw_cmd_get_bootinfo_rsp bootinfo;
+       int ret;
+
+       /* Get firmware data */
+
+       ret = s3fwrn5_fw_request_firmware(fw_info);
+       if (ret < 0) {
+               dev_err(&fw_info->ndev->nfc_dev->dev,
+                       "Failed to get fw file, ret=%02x\n", ret);
+               return ret;
+       }
+
+       /* Get bootloader info */
+
+       ret = s3fwrn5_fw_get_bootinfo(fw_info, &bootinfo);
+       if (ret < 0) {
+               dev_err(&fw_info->ndev->nfc_dev->dev,
+                       "Failed to get bootinfo, ret=%02x\n", ret);
+               goto err;
+       }
+
+       /* Match hardware version to obtain firmware base address */
+
+       ret = s3fwrn5_fw_get_base_addr(&bootinfo, &fw_info->base_addr);
+       if (ret < 0) {
+               dev_err(&fw_info->ndev->nfc_dev->dev,
+                       "Unknown hardware version\n");
+               goto err;
+       }
+
+       fw_info->sector_size = bootinfo.sector_size;
+
+       fw_info->sig_size = s3fwrn5_fw_is_custom(&bootinfo) ?
+               fw_info->fw.custom_sig_size : fw_info->fw.sig_size;
+       fw_info->sig = s3fwrn5_fw_is_custom(&bootinfo) ?
+               fw_info->fw.custom_sig : fw_info->fw.sig;
+
+       return 0;
+
+err:
+       s3fwrn5_fw_release_firmware(fw_info);
+       return ret;
+}
+
+bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version)
+{
+       struct s3fwrn5_fw_version *new = (void *) &fw_info->fw.version;
+       struct s3fwrn5_fw_version *old = (void *) &version;
+
+       if (new->major > old->major)
+               return true;
+       if (new->build1 > old->build1)
+               return true;
+       if (new->build2 > old->build2)
+               return true;
+
+       return false;
+}
+
+int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
+{
+       struct s3fwrn5_fw_image *fw = &fw_info->fw;
+       u8 hash_data[SHA1_DIGEST_SIZE];
+       struct scatterlist sg;
+       struct hash_desc desc;
+       u32 image_size, off;
+       int ret;
+
+       image_size = fw_info->sector_size * fw->image_sectors;
+
+       /* Compute SHA of firmware data */
+
+       sg_init_one(&sg, fw->image, image_size);
+       desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+       crypto_hash_init(&desc);
+       crypto_hash_update(&desc, &sg, image_size);
+       crypto_hash_final(&desc, hash_data);
+       crypto_free_hash(desc.tfm);
+
+       /* Firmware update process */
+
+       dev_info(&fw_info->ndev->nfc_dev->dev,
+               "Firmware update: %s\n", fw_info->fw_name);
+
+       ret = s3fwrn5_fw_enter_update_mode(fw_info, hash_data,
+               SHA1_DIGEST_SIZE, fw_info->sig, fw_info->sig_size);
+       if (ret < 0) {
+               dev_err(&fw_info->ndev->nfc_dev->dev,
+                       "Unable to enter update mode\n");
+               goto out;
+       }
+
+       for (off = 0; off < image_size; off += fw_info->sector_size) {
+               ret = s3fwrn5_fw_update_sector(fw_info,
+                       fw_info->base_addr + off, fw->image + off);
+               if (ret < 0) {
+                       dev_err(&fw_info->ndev->nfc_dev->dev,
+                               "Firmware update error (code=%d)\n", ret);
+                       goto out;
+               }
+       }
+
+       ret = s3fwrn5_fw_complete_update_mode(fw_info);
+       if (ret < 0) {
+               dev_err(&fw_info->ndev->nfc_dev->dev,
+                       "Unable to complete update mode\n");
+               goto out;
+       }
+
+       dev_info(&fw_info->ndev->nfc_dev->dev,
+               "Firmware update: success\n");
+
+out:
+       return ret;
+}
+
+void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name)
+{
+       fw_info->parity = 0x00;
+       fw_info->rsp = NULL;
+       fw_info->fw.fw = NULL;
+       strcpy(fw_info->fw_name, fw_name);
+       init_completion(&fw_info->completion);
+}
+
+void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info)
+{
+       s3fwrn5_fw_release_firmware(fw_info);
+}
+
+int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
+{
+       struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+       struct s3fwrn5_fw_info *fw_info = &info->fw_info;
+
+       BUG_ON(fw_info->rsp);
+
+       fw_info->rsp = skb;
+
+       complete(&fw_info->completion);
+
+       return 0;
+}
diff --git a/drivers/nfc/s3fwrn5/firmware.h b/drivers/nfc/s3fwrn5/firmware.h
new file mode 100644 (file)
index 0000000..1ec0647
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_FIRMWARE_H_
+#define __LOCAL_S3FWRN5_FIRMWARE_H_
+
+/* FW Message Types */
+#define S3FWRN5_FW_MSG_CMD                     0x00
+#define S3FWRN5_FW_MSG_RSP                     0x01
+#define S3FWRN5_FW_MSG_DATA                    0x02
+
+/* FW Return Codes */
+#define S3FWRN5_FW_RET_SUCCESS                 0x00
+#define S3FWRN5_FW_RET_MESSAGE_TYPE_INVALID    0x01
+#define S3FWRN5_FW_RET_COMMAND_INVALID         0x02
+#define S3FWRN5_FW_RET_PAGE_DATA_OVERFLOW      0x03
+#define S3FWRN5_FW_RET_SECT_DATA_OVERFLOW      0x04
+#define S3FWRN5_FW_RET_AUTHENTICATION_FAIL     0x05
+#define S3FWRN5_FW_RET_FLASH_OPERATION_FAIL    0x06
+#define S3FWRN5_FW_RET_ADDRESS_OUT_OF_RANGE    0x07
+#define S3FWRN5_FW_RET_PARAMETER_INVALID       0x08
+
+/* ---- FW Packet structures ---- */
+#define S3FWRN5_FW_HDR_SIZE 4
+
+struct s3fwrn5_fw_header {
+       __u8 type;
+       __u8 code;
+       __u16 len;
+};
+
+#define S3FWRN5_FW_CMD_RESET                   0x00
+
+#define S3FWRN5_FW_CMD_GET_BOOTINFO            0x01
+
+struct s3fwrn5_fw_cmd_get_bootinfo_rsp {
+       __u8 hw_version[4];
+       __u16 sector_size;
+       __u16 page_size;
+       __u16 frame_max_size;
+       __u16 hw_buffer_size;
+};
+
+#define S3FWRN5_FW_CMD_ENTER_UPDATE_MODE       0x02
+
+struct s3fwrn5_fw_cmd_enter_updatemode {
+       __u16 hashcode_size;
+       __u16 signature_size;
+};
+
+#define S3FWRN5_FW_CMD_UPDATE_SECTOR           0x04
+
+struct s3fwrn5_fw_cmd_update_sector {
+       __u32 base_address;
+};
+
+#define S3FWRN5_FW_CMD_COMPLETE_UPDATE_MODE    0x05
+
+struct s3fwrn5_fw_image {
+       const struct firmware *fw;
+
+       char date[13];
+       u32 version;
+       const void *sig;
+       u32 sig_size;
+       const void *image;
+       u32 image_sectors;
+       const void *custom_sig;
+       u32 custom_sig_size;
+};
+
+struct s3fwrn5_fw_info {
+       struct nci_dev *ndev;
+       struct s3fwrn5_fw_image fw;
+       char fw_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+
+       const void *sig;
+       u32 sig_size;
+       u32 sector_size;
+       u32 base_addr;
+
+       struct completion completion;
+       struct sk_buff *rsp;
+       char parity;
+};
+
+void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name);
+int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info);
+bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version);
+int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info);
+void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info);
+
+int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+
+#endif /* __LOCAL_S3FWRN5_FIRMWARE_H_ */
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
new file mode 100644 (file)
index 0000000..b4dd7dd
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * I2C Link Layer for Samsung S3FWRN5 NCI based Driver
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+
+#include <net/nfc/nfc.h>
+
+#include "s3fwrn5.h"
+
+#define S3FWRN5_I2C_DRIVER_NAME "s3fwrn5_i2c"
+
+#define S3FWRN5_I2C_MAX_PAYLOAD 32
+#define S3FWRN5_EN_WAIT_TIME 150
+
+struct s3fwrn5_i2c_phy {
+       struct i2c_client *i2c_dev;
+       struct nci_dev *ndev;
+
+       unsigned int gpio_en;
+       unsigned int gpio_fw_wake;
+
+       struct mutex mutex;
+
+       enum s3fwrn5_mode mode;
+       unsigned int irq_skip:1;
+};
+
+static void s3fwrn5_i2c_set_wake(void *phy_id, bool wake)
+{
+       struct s3fwrn5_i2c_phy *phy = phy_id;
+
+       mutex_lock(&phy->mutex);
+       gpio_set_value(phy->gpio_fw_wake, wake);
+       msleep(S3FWRN5_EN_WAIT_TIME/2);
+       mutex_unlock(&phy->mutex);
+}
+
+static void s3fwrn5_i2c_set_mode(void *phy_id, enum s3fwrn5_mode mode)
+{
+       struct s3fwrn5_i2c_phy *phy = phy_id;
+
+       mutex_lock(&phy->mutex);
+
+       if (phy->mode == mode)
+               goto out;
+
+       phy->mode = mode;
+
+       gpio_set_value(phy->gpio_en, 1);
+       gpio_set_value(phy->gpio_fw_wake, 0);
+       if (mode == S3FWRN5_MODE_FW)
+               gpio_set_value(phy->gpio_fw_wake, 1);
+
+       if (mode != S3FWRN5_MODE_COLD) {
+               msleep(S3FWRN5_EN_WAIT_TIME);
+               gpio_set_value(phy->gpio_en, 0);
+               msleep(S3FWRN5_EN_WAIT_TIME/2);
+       }
+
+       phy->irq_skip = true;
+
+out:
+       mutex_unlock(&phy->mutex);
+}
+
+static enum s3fwrn5_mode s3fwrn5_i2c_get_mode(void *phy_id)
+{
+       struct s3fwrn5_i2c_phy *phy = phy_id;
+       enum s3fwrn5_mode mode;
+
+       mutex_lock(&phy->mutex);
+
+       mode = phy->mode;
+
+       mutex_unlock(&phy->mutex);
+
+       return mode;
+}
+
+static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+       struct s3fwrn5_i2c_phy *phy = phy_id;
+       int ret;
+
+       mutex_lock(&phy->mutex);
+
+       phy->irq_skip = false;
+
+       ret = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
+       if (ret == -EREMOTEIO) {
+               /* Retry, chip was in standby */
+               usleep_range(110000, 120000);
+               ret  = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
+       }
+
+       mutex_unlock(&phy->mutex);
+
+       if (ret < 0)
+               return ret;
+
+       if (ret != skb->len)
+               return -EREMOTEIO;
+
+       return 0;
+}
+
+static struct s3fwrn5_phy_ops i2c_phy_ops = {
+       .set_wake = s3fwrn5_i2c_set_wake,
+       .set_mode = s3fwrn5_i2c_set_mode,
+       .get_mode = s3fwrn5_i2c_get_mode,
+       .write = s3fwrn5_i2c_write,
+};
+
+static int s3fwrn5_i2c_read(struct s3fwrn5_i2c_phy *phy)
+{
+       struct sk_buff *skb;
+       size_t hdr_size;
+       size_t data_len;
+       char hdr[4];
+       int ret;
+
+       hdr_size = (phy->mode == S3FWRN5_MODE_NCI) ?
+               NCI_CTRL_HDR_SIZE : S3FWRN5_FW_HDR_SIZE;
+       ret = i2c_master_recv(phy->i2c_dev, hdr, hdr_size);
+       if (ret < 0)
+               return ret;
+
+       if (ret < hdr_size)
+               return -EBADMSG;
+
+       data_len = (phy->mode == S3FWRN5_MODE_NCI) ?
+               ((struct nci_ctrl_hdr *)hdr)->plen :
+               ((struct s3fwrn5_fw_header *)hdr)->len;
+
+       skb = alloc_skb(hdr_size + data_len, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       memcpy(skb_put(skb, hdr_size), hdr, hdr_size);
+
+       if (data_len == 0)
+               goto out;
+
+       ret = i2c_master_recv(phy->i2c_dev, skb_put(skb, data_len), data_len);
+       if (ret != data_len) {
+               kfree_skb(skb);
+               return -EBADMSG;
+       }
+
+out:
+       return s3fwrn5_recv_frame(phy->ndev, skb, phy->mode);
+}
+
+static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+       struct s3fwrn5_i2c_phy *phy = phy_id;
+       int ret = 0;
+
+       if (!phy || !phy->ndev) {
+               WARN_ON_ONCE(1);
+               return IRQ_NONE;
+       }
+
+       mutex_lock(&phy->mutex);
+
+       if (phy->irq_skip)
+               goto out;
+
+       switch (phy->mode) {
+       case S3FWRN5_MODE_NCI:
+       case S3FWRN5_MODE_FW:
+               ret = s3fwrn5_i2c_read(phy);
+               break;
+       case S3FWRN5_MODE_COLD:
+               ret = -EREMOTEIO;
+               break;
+       }
+
+out:
+       mutex_unlock(&phy->mutex);
+
+       return IRQ_HANDLED;
+}
+
+static int s3fwrn5_i2c_parse_dt(struct i2c_client *client)
+{
+       struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
+       struct device_node *np = client->dev.of_node;
+
+       if (!np)
+               return -ENODEV;
+
+       phy->gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0);
+       if (!gpio_is_valid(phy->gpio_en))
+               return -ENODEV;
+
+       phy->gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0);
+       if (!gpio_is_valid(phy->gpio_fw_wake))
+               return -ENODEV;
+
+       return 0;
+}
+
+static int s3fwrn5_i2c_probe(struct i2c_client *client,
+                                 const struct i2c_device_id *id)
+{
+       struct s3fwrn5_i2c_phy *phy;
+       int ret;
+
+       phy = devm_kzalloc(&client->dev, sizeof(*phy), GFP_KERNEL);
+       if (!phy)
+               return -ENOMEM;
+
+       mutex_init(&phy->mutex);
+       phy->mode = S3FWRN5_MODE_COLD;
+       phy->irq_skip = true;
+
+       phy->i2c_dev = client;
+       i2c_set_clientdata(client, phy);
+
+       ret = s3fwrn5_i2c_parse_dt(client);
+       if (ret < 0)
+               return ret;
+
+       ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en,
+               GPIOF_OUT_INIT_HIGH, "s3fwrn5_en");
+       if (ret < 0)
+               return ret;
+
+       ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw_wake,
+               GPIOF_OUT_INIT_LOW, "s3fwrn5_fw_wake");
+       if (ret < 0)
+               return ret;
+
+       ret = s3fwrn5_probe(&phy->ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops,
+               S3FWRN5_I2C_MAX_PAYLOAD);
+       if (ret < 0)
+               return ret;
+
+       ret = request_threaded_irq(phy->i2c_dev->irq, NULL,
+               s3fwrn5_i2c_irq_thread_fn, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+               S3FWRN5_I2C_DRIVER_NAME, phy);
+       if (ret)
+               s3fwrn5_remove(phy->ndev);
+
+       return ret;
+}
+
+static int s3fwrn5_i2c_remove(struct i2c_client *client)
+{
+       struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
+
+       s3fwrn5_remove(phy->ndev);
+
+       return 0;
+}
+
+static struct i2c_device_id s3fwrn5_i2c_id_table[] = {
+       {S3FWRN5_I2C_DRIVER_NAME, 0},
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, s3fwrn5_i2c_id_table);
+
+static const struct of_device_id of_s3fwrn5_i2c_match[] = {
+       { .compatible = "samsung,s3fwrn5-i2c", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, of_s3fwrn5_i2c_match);
+
+static struct i2c_driver s3fwrn5_i2c_driver = {
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = S3FWRN5_I2C_DRIVER_NAME,
+               .of_match_table = of_match_ptr(of_s3fwrn5_i2c_match),
+       },
+       .probe = s3fwrn5_i2c_probe,
+       .remove = s3fwrn5_i2c_remove,
+       .id_table = s3fwrn5_i2c_id_table,
+};
+
+module_i2c_driver(s3fwrn5_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("I2C driver for Samsung S3FWRN5");
+MODULE_AUTHOR("Robert Baldyga <r.baldyga@samsung.com>");
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
new file mode 100644 (file)
index 0000000..ace0071
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+
+#include "s3fwrn5.h"
+#include "nci.h"
+
+static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb)
+{
+       __u8 status = skb->data[0];
+
+       nci_req_complete(ndev, status);
+       return 0;
+}
+
+static struct nci_prop_ops s3fwrn5_nci_prop_ops[] = {
+       {
+               .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+                               NCI_PROP_AGAIN),
+               .rsp = s3fwrn5_nci_prop_rsp,
+       },
+       {
+               .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+                               NCI_PROP_GET_RFREG),
+               .rsp = s3fwrn5_nci_prop_rsp,
+       },
+       {
+               .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+                               NCI_PROP_SET_RFREG),
+               .rsp = s3fwrn5_nci_prop_rsp,
+       },
+       {
+               .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+                               NCI_PROP_GET_RFREG_VER),
+               .rsp = s3fwrn5_nci_prop_rsp,
+       },
+       {
+               .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+                               NCI_PROP_SET_RFREG_VER),
+               .rsp = s3fwrn5_nci_prop_rsp,
+       },
+       {
+               .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+                               NCI_PROP_START_RFREG),
+               .rsp = s3fwrn5_nci_prop_rsp,
+       },
+       {
+               .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+                               NCI_PROP_STOP_RFREG),
+               .rsp = s3fwrn5_nci_prop_rsp,
+       },
+       {
+               .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+                               NCI_PROP_FW_CFG),
+               .rsp = s3fwrn5_nci_prop_rsp,
+       },
+       {
+               .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+                               NCI_PROP_WR_RESET),
+               .rsp = s3fwrn5_nci_prop_rsp,
+       },
+};
+
+void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n)
+{
+       *ops = s3fwrn5_nci_prop_ops;
+       *n = ARRAY_SIZE(s3fwrn5_nci_prop_ops);
+}
+
+#define S3FWRN5_RFREG_SECTION_SIZE 252
+
+int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
+{
+       const struct firmware *fw;
+       struct nci_prop_fw_cfg_cmd fw_cfg;
+       struct nci_prop_set_rfreg_cmd set_rfreg;
+       struct nci_prop_stop_rfreg_cmd stop_rfreg;
+       u32 checksum;
+       int i, len;
+       int ret;
+
+       ret = request_firmware(&fw, fw_name, &info->ndev->nfc_dev->dev);
+       if (ret < 0)
+               return ret;
+
+       /* Compute rfreg checksum */
+
+       checksum = 0;
+       for (i = 0; i < fw->size; i += 4)
+               checksum += *((u32 *)(fw->data+i));
+
+       /* Set default clock configuration for external crystal */
+
+       fw_cfg.clk_type = 0x01;
+       fw_cfg.clk_speed = 0xff;
+       fw_cfg.clk_req = 0xff;
+       ret = nci_prop_cmd(info->ndev, NCI_PROP_FW_CFG,
+               sizeof(fw_cfg), (__u8 *)&fw_cfg);
+       if (ret < 0)
+               goto out;
+
+       /* Start rfreg configuration */
+
+       dev_info(&info->ndev->nfc_dev->dev,
+               "rfreg configuration update: %s\n", fw_name);
+
+       ret = nci_prop_cmd(info->ndev, NCI_PROP_START_RFREG, 0, NULL);
+       if (ret < 0) {
+               dev_err(&info->ndev->nfc_dev->dev,
+                       "Unable to start rfreg update\n");
+               goto out;
+       }
+
+       /* Update rfreg */
+
+       set_rfreg.index = 0;
+       for (i = 0; i < fw->size; i += S3FWRN5_RFREG_SECTION_SIZE) {
+               len = (fw->size - i < S3FWRN5_RFREG_SECTION_SIZE) ?
+                       (fw->size - i) : S3FWRN5_RFREG_SECTION_SIZE;
+               memcpy(set_rfreg.data, fw->data+i, len);
+               ret = nci_prop_cmd(info->ndev, NCI_PROP_SET_RFREG,
+                       len+1, (__u8 *)&set_rfreg);
+               if (ret < 0) {
+                       dev_err(&info->ndev->nfc_dev->dev,
+                               "rfreg update error (code=%d)\n", ret);
+                       goto out;
+               }
+               set_rfreg.index++;
+       }
+
+       /* Finish rfreg configuration */
+
+       stop_rfreg.checksum = checksum & 0xffff;
+       ret = nci_prop_cmd(info->ndev, NCI_PROP_STOP_RFREG,
+               sizeof(stop_rfreg), (__u8 *)&stop_rfreg);
+       if (ret < 0) {
+               dev_err(&info->ndev->nfc_dev->dev,
+                       "Unable to stop rfreg update\n");
+               goto out;
+       }
+
+       dev_info(&info->ndev->nfc_dev->dev,
+               "rfreg configuration update: success\n");
+out:
+       release_firmware(fw);
+       return ret;
+}
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
new file mode 100644 (file)
index 0000000..0e68d43
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_NCI_H_
+#define __LOCAL_S3FWRN5_NCI_H_
+
+#include "s3fwrn5.h"
+
+#define NCI_PROP_AGAIN         0x01
+
+#define NCI_PROP_GET_RFREG     0x21
+#define NCI_PROP_SET_RFREG     0x22
+
+struct nci_prop_set_rfreg_cmd {
+       __u8 index;
+       __u8 data[252];
+};
+
+struct nci_prop_set_rfreg_rsp {
+       __u8 status;
+};
+
+#define NCI_PROP_GET_RFREG_VER 0x24
+
+struct nci_prop_get_rfreg_ver_rsp {
+       __u8 status;
+       __u8 data[8];
+};
+
+#define NCI_PROP_SET_RFREG_VER 0x25
+
+struct nci_prop_set_rfreg_ver_cmd {
+       __u8 data[8];
+};
+
+struct nci_prop_set_rfreg_ver_rsp {
+       __u8 status;
+};
+
+#define NCI_PROP_START_RFREG   0x26
+
+struct nci_prop_start_rfreg_rsp {
+       __u8 status;
+};
+
+#define NCI_PROP_STOP_RFREG    0x27
+
+struct nci_prop_stop_rfreg_cmd {
+       __u16 checksum;
+};
+
+struct nci_prop_stop_rfreg_rsp {
+       __u8 status;
+};
+
+#define NCI_PROP_FW_CFG                0x28
+
+struct nci_prop_fw_cfg_cmd {
+       __u8 clk_type;
+       __u8 clk_speed;
+       __u8 clk_req;
+};
+
+struct nci_prop_fw_cfg_rsp {
+       __u8 status;
+};
+
+#define NCI_PROP_WR_RESET      0x2f
+
+void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n);
+int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name);
+
+#endif /* __LOCAL_S3FWRN5_NCI_H_ */
diff --git a/drivers/nfc/s3fwrn5/s3fwrn5.h b/drivers/nfc/s3fwrn5/s3fwrn5.h
new file mode 100644 (file)
index 0000000..89210d4
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_H_
+#define __LOCAL_S3FWRN5_H_
+
+#include <linux/nfc.h>
+
+#include <net/nfc/nci_core.h>
+
+#include "firmware.h"
+
+enum s3fwrn5_mode {
+       S3FWRN5_MODE_COLD,
+       S3FWRN5_MODE_NCI,
+       S3FWRN5_MODE_FW,
+};
+
+struct s3fwrn5_phy_ops {
+       void (*set_wake)(void *id, bool sleep);
+       void (*set_mode)(void *id, enum s3fwrn5_mode);
+       enum s3fwrn5_mode (*get_mode)(void *id);
+       int (*write)(void *id, struct sk_buff *skb);
+};
+
+struct s3fwrn5_info {
+       struct nci_dev *ndev;
+       void *phy_id;
+       struct device *pdev;
+
+       struct s3fwrn5_phy_ops *phy_ops;
+       unsigned int max_payload;
+
+       struct s3fwrn5_fw_info fw_info;
+
+       struct mutex mutex;
+};
+
+static inline int s3fwrn5_set_mode(struct s3fwrn5_info *info,
+       enum s3fwrn5_mode mode)
+{
+       if (!info->phy_ops->set_mode)
+               return -ENOTSUPP;
+
+       info->phy_ops->set_mode(info->phy_id, mode);
+
+       return 0;
+}
+
+static inline enum s3fwrn5_mode s3fwrn5_get_mode(struct s3fwrn5_info *info)
+{
+       if (!info->phy_ops->get_mode)
+               return -ENOTSUPP;
+
+       return info->phy_ops->get_mode(info->phy_id);
+}
+
+static inline int s3fwrn5_set_wake(struct s3fwrn5_info *info, bool wake)
+{
+       if (!info->phy_ops->set_wake)
+               return -ENOTSUPP;
+
+       info->phy_ops->set_wake(info->phy_id, wake);
+
+       return 0;
+}
+
+static inline int s3fwrn5_write(struct s3fwrn5_info *info, struct sk_buff *skb)
+{
+       if (!info->phy_ops->write)
+               return -ENOTSUPP;
+
+       return info->phy_ops->write(info->phy_id, skb);
+}
+
+int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
+       struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload);
+void s3fwrn5_remove(struct nci_dev *ndev);
+
+int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
+       enum s3fwrn5_mode mode);
+
+#endif /* __LOCAL_S3FWRN5_H_ */
index fc3904c946ee928d41743309f7b13f0b69cca2c2..e7c6db9c58607d670fb3ee7d6e4b8c217817eab7 100644 (file)
@@ -21,3 +21,14 @@ config NFC_ST_NCI_I2C
 
          If you choose to build a module, it'll be called st-nci_i2c.
          Say N if unsure.
+
+config NFC_ST_NCI_SPI
+       tristate "NFC ST NCI spi support"
+       depends on NFC_ST_NCI && SPI
+       ---help---
+         This module adds support for an SPI interface to the
+         STMicroelectronics NFC NCI chips familly.
+         Select this if your platform is using the spi bus.
+
+         If you choose to build a module, it'll be called st-nci_spi.
+         Say N if unsure.
index 0df157df3a942312a8e6b5ed2e11f67923672897..348ce76f2177546a138d8e0fd07358ac18d8ed80 100644 (file)
@@ -7,3 +7,6 @@ obj-$(CONFIG_NFC_ST_NCI)     += st-nci.o
 
 st-nci_i2c-objs = i2c.o
 obj-$(CONFIG_NFC_ST_NCI_I2C) += st-nci_i2c.o
+
+st-nci_spi-objs = spi.o
+obj-$(CONFIG_NFC_ST_NCI_SPI) += st-nci_spi.o
index 06175ce769bbf349d3539eaabacb2edefcf3afb4..707ed2eb593638208df4420c7dc9a93abd4399ad 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/nfc.h>
-#include <linux/platform_data/st_nci.h>
+#include <linux/platform_data/st-nci.h>
 
 #include "ndlc.h"
 
-#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
 
 /* ndlc header */
-#define ST21NFCB_FRAME_HEADROOM        1
-#define ST21NFCB_FRAME_TAILROOM 0
+#define ST_NCI_FRAME_HEADROOM  1
+#define ST_NCI_FRAME_TAILROOM 0
 
 #define ST_NCI_I2C_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
 #define ST_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
@@ -118,15 +118,10 @@ static int st_nci_i2c_write(void *phy_id, struct sk_buff *skb)
 /*
  * Reads an ndlc frame and returns it in a newly allocated sk_buff.
  * returns:
- * frame size : if received frame is complete (find ST21NFCB_SOF_EOF at
- * end of read)
- * -EAGAIN : if received frame is incomplete (not find ST21NFCB_SOF_EOF
- * at end of read)
+ * 0 : if received frame is complete
  * -EREMOTEIO : i2c read error (fatal)
  * -EBADMSG : frame was incorrect and discarded
- * (value returned from st_nci_i2c_repack)
- * -EIO : if no ST21NFCB_SOF_EOF is found after reaching
- * the read length end sequence
+ * -ENOMEM : cannot allocate skb, frame dropped
  */
 static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
                                 struct sk_buff **skb)
@@ -179,7 +174,7 @@ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
 /*
  * Reads an ndlc frame from the chip.
  *
- * On ST21NFCB, IRQ goes in idle state when read starts.
+ * On ST_NCI, IRQ goes in idle state when read starts.
  */
 static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
 {
@@ -325,12 +320,12 @@ static int st_nci_i2c_probe(struct i2c_client *client,
                }
        } else {
                nfc_err(&client->dev,
-                       "st21nfcb platform resources not available\n");
+                       "st_nci platform resources not available\n");
                return -ENODEV;
        }
 
        r = ndlc_probe(phy, &i2c_phy_ops, &client->dev,
-                       ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
+                       ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
                        &phy->ndlc);
        if (r < 0) {
                nfc_err(&client->dev, "Unable to register ndlc layer\n");
index 56c6a4cb4c9619ff7a35ccfbd568b998474dc636..d2cf84e680c655d19d6e09f44ee73024b1630304 100644 (file)
@@ -171,6 +171,8 @@ static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc)
                if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_SUPERVISOR) {
                        switch (pcb & PCB_SYNC_MASK) {
                        case PCB_SYNC_ACK:
+                               skb = skb_dequeue(&ndlc->ack_pending_q);
+                               kfree_skb(skb);
                                del_timer_sync(&ndlc->t1_timer);
                                del_timer_sync(&ndlc->t2_timer);
                                ndlc->t2_active = false;
@@ -192,12 +194,13 @@ static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc)
                                          msecs_to_jiffies(NDLC_TIMER_T1_WAIT));
                                break;
                        default:
-                               pr_err("UNKNOWN Packet Control Byte=%d\n", pcb);
                                kfree_skb(skb);
                                break;
                        }
-               } else {
+               } else if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_DATAFRAME) {
                        nci_recv_frame(ndlc->ndev, skb);
+               } else {
+                       kfree_skb(skb);
                }
        }
 }
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
new file mode 100644 (file)
index 0000000..598a58c
--- /dev/null
@@ -0,0 +1,392 @@
+/*
+ * SPI Link Layer for ST NCI based Driver
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nfc.h>
+#include <linux/platform_data/st-nci.h>
+
+#include "ndlc.h"
+
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
+
+/* ndlc header */
+#define ST_NCI_FRAME_HEADROOM  1
+#define ST_NCI_FRAME_TAILROOM 0
+
+#define ST_NCI_SPI_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
+#define ST_NCI_SPI_MAX_SIZE 250 /* req 4.2.1 */
+
+#define ST_NCI_SPI_DRIVER_NAME "st_nci_spi"
+
+static struct spi_device_id st_nci_spi_id_table[] = {
+       {ST_NCI_SPI_DRIVER_NAME, 0},
+       {}
+};
+MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
+
+struct st_nci_spi_phy {
+       struct spi_device *spi_dev;
+       struct llt_ndlc *ndlc;
+
+       unsigned int gpio_reset;
+       unsigned int irq_polarity;
+};
+
+#define SPI_DUMP_SKB(info, skb)                                        \
+do {                                                           \
+       pr_debug("%s:\n", info);                                \
+       print_hex_dump(KERN_DEBUG, "spi: ", DUMP_PREFIX_OFFSET, \
+                      16, 1, (skb)->data, (skb)->len, 0);      \
+} while (0)
+
+static int st_nci_spi_enable(void *phy_id)
+{
+       struct st_nci_spi_phy *phy = phy_id;
+
+       gpio_set_value(phy->gpio_reset, 0);
+       usleep_range(10000, 15000);
+       gpio_set_value(phy->gpio_reset, 1);
+       usleep_range(80000, 85000);
+
+       if (phy->ndlc->powered == 0)
+               enable_irq(phy->spi_dev->irq);
+
+       return 0;
+}
+
+static void st_nci_spi_disable(void *phy_id)
+{
+       struct st_nci_spi_phy *phy = phy_id;
+
+       disable_irq_nosync(phy->spi_dev->irq);
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int st_nci_spi_write(void *phy_id, struct sk_buff *skb)
+{
+       int r;
+       struct st_nci_spi_phy *phy = phy_id;
+       struct spi_device *dev = phy->spi_dev;
+       struct sk_buff *skb_rx;
+       u8 buf[ST_NCI_SPI_MAX_SIZE];
+       struct spi_transfer spi_xfer = {
+               .tx_buf = skb->data,
+               .rx_buf = buf,
+               .len = skb->len,
+       };
+
+       SPI_DUMP_SKB("st_nci_spi_write", skb);
+
+       if (phy->ndlc->hard_fault != 0)
+               return phy->ndlc->hard_fault;
+
+       r = spi_sync_transfer(dev, &spi_xfer, 1);
+       /*
+        * We may have received some valuable data on miso line.
+        * Send them back in the ndlc state machine.
+        */
+       if (!r) {
+               skb_rx = alloc_skb(skb->len, GFP_KERNEL);
+               if (!skb_rx) {
+                       r = -ENOMEM;
+                       goto exit;
+               }
+
+               skb_put(skb_rx, skb->len);
+               memcpy(skb_rx->data, buf, skb->len);
+               ndlc_recv(phy->ndlc, skb_rx);
+       }
+
+exit:
+       return r;
+}
+
+/*
+ * Reads an ndlc frame and returns it in a newly allocated sk_buff.
+ * returns:
+ * 0 : if received frame is complete
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * -ENOMEM : cannot allocate skb, frame dropped
+ */
+static int st_nci_spi_read(struct st_nci_spi_phy *phy,
+                       struct sk_buff **skb)
+{
+       int r;
+       u8 len;
+       u8 buf[ST_NCI_SPI_MAX_SIZE];
+       struct spi_device *dev = phy->spi_dev;
+       struct spi_transfer spi_xfer = {
+               .rx_buf = buf,
+               .len = ST_NCI_SPI_MIN_SIZE,
+       };
+
+       r = spi_sync_transfer(dev, &spi_xfer, 1);
+       if (r < 0)
+               return -EREMOTEIO;
+
+       len = be16_to_cpu(*(__be16 *) (buf + 2));
+       if (len > ST_NCI_SPI_MAX_SIZE) {
+               nfc_err(&dev->dev, "invalid frame len\n");
+               phy->ndlc->hard_fault = 1;
+               return -EBADMSG;
+       }
+
+       *skb = alloc_skb(ST_NCI_SPI_MIN_SIZE + len, GFP_KERNEL);
+       if (*skb == NULL)
+               return -ENOMEM;
+
+       skb_reserve(*skb, ST_NCI_SPI_MIN_SIZE);
+       skb_put(*skb, ST_NCI_SPI_MIN_SIZE);
+       memcpy((*skb)->data, buf, ST_NCI_SPI_MIN_SIZE);
+
+       if (!len)
+               return 0;
+
+       spi_xfer.len = len;
+       r = spi_sync_transfer(dev, &spi_xfer, 1);
+       if (r < 0) {
+               kfree_skb(*skb);
+               return -EREMOTEIO;
+       }
+
+       skb_put(*skb, len);
+       memcpy((*skb)->data + ST_NCI_SPI_MIN_SIZE, buf, len);
+
+       SPI_DUMP_SKB("spi frame read", *skb);
+
+       return 0;
+}
+
+/*
+ * Reads an ndlc frame from the chip.
+ *
+ * On ST21NFCB, IRQ goes in idle state when read starts.
+ */
+static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
+{
+       struct st_nci_spi_phy *phy = phy_id;
+       struct spi_device *dev;
+       struct sk_buff *skb = NULL;
+       int r;
+
+       if (!phy || !phy->ndlc || irq != phy->spi_dev->irq) {
+               WARN_ON_ONCE(1);
+               return IRQ_NONE;
+       }
+
+       dev = phy->spi_dev;
+       dev_dbg(&dev->dev, "IRQ\n");
+
+       if (phy->ndlc->hard_fault)
+               return IRQ_HANDLED;
+
+       if (!phy->ndlc->powered) {
+               st_nci_spi_disable(phy);
+               return IRQ_HANDLED;
+       }
+
+       r = st_nci_spi_read(phy, &skb);
+       if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG)
+               return IRQ_HANDLED;
+
+       ndlc_recv(phy->ndlc, skb);
+
+       return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops spi_phy_ops = {
+       .write = st_nci_spi_write,
+       .enable = st_nci_spi_enable,
+       .disable = st_nci_spi_disable,
+};
+
+#ifdef CONFIG_OF
+static int st_nci_spi_of_request_resources(struct spi_device *dev)
+{
+       struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+       struct device_node *pp;
+       int gpio;
+       int r;
+
+       pp = dev->dev.of_node;
+       if (!pp)
+               return -ENODEV;
+
+       /* Get GPIO from device tree */
+       gpio = of_get_named_gpio(pp, "reset-gpios", 0);
+       if (gpio < 0) {
+               nfc_err(&dev->dev,
+                       "Failed to retrieve reset-gpios from device tree\n");
+               return gpio;
+       }
+
+       /* GPIO request and configuration */
+       r = devm_gpio_request_one(&dev->dev, gpio,
+                               GPIOF_OUT_INIT_HIGH, "clf_reset");
+       if (r) {
+               nfc_err(&dev->dev, "Failed to request reset pin\n");
+               return r;
+       }
+       phy->gpio_reset = gpio;
+
+       phy->irq_polarity = irq_get_trigger_type(dev->irq);
+
+       return 0;
+}
+#else
+static int st_nci_spi_of_request_resources(struct spi_device *dev)
+{
+       return -ENODEV;
+}
+#endif
+
+static int st_nci_spi_request_resources(struct spi_device *dev)
+{
+       struct st_nci_nfc_platform_data *pdata;
+       struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+       int r;
+
+       pdata = dev->dev.platform_data;
+       if (pdata == NULL) {
+               nfc_err(&dev->dev, "No platform data\n");
+               return -EINVAL;
+       }
+
+       /* store for later use */
+       phy->gpio_reset = pdata->gpio_reset;
+       phy->irq_polarity = pdata->irq_polarity;
+
+       r = devm_gpio_request_one(&dev->dev,
+                       phy->gpio_reset, GPIOF_OUT_INIT_HIGH, "clf_reset");
+       if (r) {
+               pr_err("%s : reset gpio_request failed\n", __FILE__);
+               return r;
+       }
+
+       return 0;
+}
+
+static int st_nci_spi_probe(struct spi_device *dev)
+{
+       struct st_nci_spi_phy *phy;
+       struct st_nci_nfc_platform_data *pdata;
+       int r;
+
+       dev_dbg(&dev->dev, "%s\n", __func__);
+       dev_dbg(&dev->dev, "IRQ: %d\n", dev->irq);
+
+       /* Check SPI platform functionnalities */
+       if (!dev) {
+               pr_debug("%s: dev is NULL. Device is not accessible.\n",
+                       __func__);
+               return -ENODEV;
+       }
+
+       phy = devm_kzalloc(&dev->dev, sizeof(struct st_nci_spi_phy),
+                          GFP_KERNEL);
+       if (!phy)
+               return -ENOMEM;
+
+       phy->spi_dev = dev;
+
+       spi_set_drvdata(dev, phy);
+
+       pdata = dev->dev.platform_data;
+       if (!pdata && dev->dev.of_node) {
+               r = st_nci_spi_of_request_resources(dev);
+               if (r) {
+                       nfc_err(&dev->dev, "No platform data\n");
+                       return r;
+               }
+       } else if (pdata) {
+               r = st_nci_spi_request_resources(dev);
+               if (r) {
+                       nfc_err(&dev->dev,
+                               "Cannot get platform resources\n");
+                       return r;
+               }
+       } else {
+               nfc_err(&dev->dev,
+                       "st_nci platform resources not available\n");
+               return -ENODEV;
+       }
+
+       r = ndlc_probe(phy, &spi_phy_ops, &dev->dev,
+                       ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
+                       &phy->ndlc);
+       if (r < 0) {
+               nfc_err(&dev->dev, "Unable to register ndlc layer\n");
+               return r;
+       }
+
+       r = devm_request_threaded_irq(&dev->dev, dev->irq, NULL,
+                               st_nci_irq_thread_fn,
+                               phy->irq_polarity | IRQF_ONESHOT,
+                               ST_NCI_SPI_DRIVER_NAME, phy);
+       if (r < 0)
+               nfc_err(&dev->dev, "Unable to register IRQ handler\n");
+
+       return r;
+}
+
+static int st_nci_spi_remove(struct spi_device *dev)
+{
+       struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+
+       dev_dbg(&dev->dev, "%s\n", __func__);
+
+       ndlc_remove(phy->ndlc);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_st_nci_spi_match[] = {
+       { .compatible = "st,st21nfcb-spi", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, of_st_nci_spi_match);
+#endif
+
+static struct spi_driver st_nci_spi_driver = {
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = ST_NCI_SPI_DRIVER_NAME,
+               .of_match_table = of_match_ptr(of_st_nci_spi_match),
+       },
+       .probe = st_nci_spi_probe,
+       .id_table = st_nci_spi_id_table,
+       .remove = st_nci_spi_remove,
+};
+
+module_spi_driver(st_nci_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
index 97addfa96c6ff38c088a66e6d303113bc7ca086a..c742ef65a05a50c72f4142e0fed570980e01aa39 100644 (file)
@@ -189,14 +189,14 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
                                ST_NCI_DEVICE_MGNT_GATE,
                                ST_NCI_DEVICE_MGNT_PIPE);
        if (r < 0)
-               goto free_info;
+               return r;
 
        /* Get pipe list */
        r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
                        ST_NCI_DM_GETINFO, pipe_list, sizeof(pipe_list),
                        &skb_pipe_list);
        if (r < 0)
-               goto free_info;
+               return r;
 
        /* Complete the existing gate_pipe table */
        for (i = 0; i < skb_pipe_list->len; i++) {
@@ -222,6 +222,7 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
                    dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) {
                        pr_err("Unexpected apdu_reader pipe on host %x\n",
                               dm_pipe_info->src_host_id);
+                       kfree_skb(skb_pipe_info);
                        continue;
                }
 
@@ -241,13 +242,12 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
                        ndev->hci_dev->pipes[st_nci_gates[j].pipe].host =
                                                dm_pipe_info->src_host_id;
                }
+               kfree_skb(skb_pipe_info);
        }
 
        memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
               sizeof(st_nci_gates));
 
-free_info:
-       kfree_skb(skb_pipe_info);
        kfree_skb(skb_pipe_list);
        return r;
 }
index d251f7229c4e8fe5aa9da16baf09e75ed0a5b412..051286562fab3d72fb80bf822704cac7d3817c01 100644 (file)
@@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
                                ST21NFCA_DEVICE_MGNT_GATE,
                                ST21NFCA_DEVICE_MGNT_PIPE);
        if (r < 0)
-               goto free_info;
+               return r;
 
        /* Get pipe list */
        r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
                        ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
                        &skb_pipe_list);
        if (r < 0)
-               goto free_info;
+               return r;
 
        /* Complete the existing gate_pipe table */
        for (i = 0; i < skb_pipe_list->len; i++) {
@@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
                        info->src_host_id != ST21NFCA_ESE_HOST_ID) {
                        pr_err("Unexpected apdu_reader pipe on host %x\n",
                                info->src_host_id);
+                       kfree_skb(skb_pipe_info);
                        continue;
                }
 
@@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
                        hdev->pipes[st21nfca_gates[j].pipe].dest_host =
                                                        info->src_host_id;
                }
+               kfree_skb(skb_pipe_info);
        }
 
        /*
@@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
                                        st21nfca_gates[i].gate,
                                        st21nfca_gates[i].pipe);
                        if (r < 0)
-                               goto free_info;
+                               goto free_list;
                }
        }
 
        memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
-free_info:
-       kfree_skb(skb_pipe_info);
+free_list:
        kfree_skb(skb_pipe_list);
        return r;
 }
index 85b4d86772d8b37e05f83b07377b2af6cd2d0d75..70b0707fd9a98514b5785dbe3a6fc467f8206d07 100644 (file)
 
 #define TRF7970A_NFC_TARGET_LEVEL_RFDET(v)     ((v) & 0x07)
 #define TRF7970A_NFC_TARGET_LEVEL_HI_RF                BIT(3)
-#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN       BIT(3)
+#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN       BIT(5)
 #define TRF7970A_NFC_TARGET_LEVEL_LD_S_4BYTES  (0x0 << 6)
 #define TRF7970A_NFC_TARGET_LEVEL_LD_S_7BYTES  (0x1 << 6)
 #define TRF7970A_NFC_TARGET_LEVEL_LD_S_10BYTES (0x2 << 6)
@@ -629,7 +629,9 @@ static void trf7970a_send_upstream(struct trf7970a *trf)
        }
 
        if (trf->adjust_resp_len) {
-               skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+               if (trf->rx_skb)
+                       skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+
                trf->adjust_resp_len = false;
        }
 
index 23435f2a5486f806ee8f686fdb898d0815a68013..2e2530743831a19f2ca4db3b313d1f8b2db44ebb 100644 (file)
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
        ntb->dev.bus = &ntb_bus;
        ntb->dev.parent = &ntb->pdev->dev;
        ntb->dev.release = ntb_dev_release;
-       dev_set_name(&ntb->dev, pci_name(ntb->pdev));
+       dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
 
        ntb->ctx = NULL;
        ntb->ctx_ops = NULL;
index efe3ad4122f2ee1094da78c1bb31d86642b5b3a6..1c6386d5f79c742737e4ee1a8a2b99df686ffaa0 100644 (file)
@@ -142,10 +142,11 @@ struct ntb_transport_qp {
 
        void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
                           void *data, int len);
+       struct list_head rx_post_q;
        struct list_head rx_pend_q;
        struct list_head rx_free_q;
-       spinlock_t ntb_rx_pend_q_lock;
-       spinlock_t ntb_rx_free_q_lock;
+       /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+       spinlock_t ntb_rx_q_lock;
        void *rx_buff;
        unsigned int rx_index;
        unsigned int rx_max_entry;
@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
        bool link_is_up;
        struct delayed_work link_work;
        struct work_struct link_cleanup;
+
+       struct dentry *debugfs_node_dir;
 };
 
 enum {
@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
        char *buf;
        ssize_t ret, out_offset, out_count;
 
+       qp = filp->private_data;
+
+       if (!qp || !qp->link_is_up)
+               return 0;
+
        out_count = 1000;
 
        buf = kmalloc(out_count, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
-       qp = filp->private_data;
        out_offset = 0;
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
                               "NTB QP stats\n");
@@ -534,6 +541,27 @@ out:
        return entry;
 }
 
+static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
+                                          struct list_head *list,
+                                          struct list_head *to_list)
+{
+       struct ntb_queue_entry *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(lock, flags);
+
+       if (list_empty(list)) {
+               entry = NULL;
+       } else {
+               entry = list_first_entry(list, struct ntb_queue_entry, entry);
+               list_move_tail(&entry->entry, to_list);
+       }
+
+       spin_unlock_irqrestore(lock, flags);
+
+       return entry;
+}
+
 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
                                     unsigned int qp_num)
 {
@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
 }
 
 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
-                     unsigned int size)
+                     resource_size_t size)
 {
        struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
        struct pci_dev *pdev = nt->ndev->pdev;
-       unsigned int xlat_size, buff_size;
+       size_t xlat_size, buff_size;
        int rc;
 
+       if (!size)
+               return -EINVAL;
+
        xlat_size = round_up(size, mw->xlat_align_size);
        buff_size = round_up(size, mw->xlat_align);
 
@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
        if (!mw->virt_addr) {
                mw->xlat_size = 0;
                mw->buff_size = 0;
-               dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+               dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
                        buff_size);
                return -ENOMEM;
        }
@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
 
                if (qp->event_handler)
                        qp->event_handler(qp->cb_data, qp->link_is_up);
+
+               tasklet_schedule(&qp->rxc_db_work);
        } else if (nt->link_is_up)
                schedule_delayed_work(&qp->link_work,
                                      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        qp->tx_max_frame = min(transport_mtu, tx_size / 2);
        qp->tx_max_entry = tx_size / qp->tx_max_frame;
 
-       if (nt_debugfs_dir) {
+       if (nt->debugfs_node_dir) {
                char debugfs_name[4];
 
                snprintf(debugfs_name, 4, "qp%d", qp_num);
                qp->debugfs_dir = debugfs_create_dir(debugfs_name,
-                                                    nt_debugfs_dir);
+                                                    nt->debugfs_node_dir);
 
                qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
                                                        qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
        INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
 
-       spin_lock_init(&qp->ntb_rx_pend_q_lock);
-       spin_lock_init(&qp->ntb_rx_free_q_lock);
+       spin_lock_init(&qp->ntb_rx_q_lock);
        spin_lock_init(&qp->ntb_tx_free_q_lock);
 
+       INIT_LIST_HEAD(&qp->rx_post_q);
        INIT_LIST_HEAD(&qp->rx_pend_q);
        INIT_LIST_HEAD(&qp->rx_free_q);
        INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
                goto err2;
        }
 
+       if (nt_debugfs_dir) {
+               nt->debugfs_node_dir =
+                       debugfs_create_dir(pci_name(ndev->pdev),
+                                          nt_debugfs_dir);
+       }
+
        for (i = 0; i < qp_count; i++) {
                rc = ntb_transport_init_queue(nt, i);
                if (rc)
@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
        kfree(nt);
 }
 
-static void ntb_rx_copy_callback(void *data)
+static void ntb_complete_rxc(struct ntb_transport_qp *qp)
 {
-       struct ntb_queue_entry *entry = data;
-       struct ntb_transport_qp *qp = entry->qp;
-       void *cb_data = entry->cb_data;
-       unsigned int len = entry->len;
-       struct ntb_payload_header *hdr = entry->rx_hdr;
+       struct ntb_queue_entry *entry;
+       void *cb_data;
+       unsigned int len;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+
+       while (!list_empty(&qp->rx_post_q)) {
+               entry = list_first_entry(&qp->rx_post_q,
+                                        struct ntb_queue_entry, entry);
+               if (!(entry->flags & DESC_DONE_FLAG))
+                       break;
+
+               entry->rx_hdr->flags = 0;
+               iowrite32(entry->index, &qp->rx_info->entry);
 
-       hdr->flags = 0;
+               cb_data = entry->cb_data;
+               len = entry->len;
 
-       iowrite32(entry->index, &qp->rx_info->entry);
+               list_move_tail(&entry->entry, &qp->rx_free_q);
 
-       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+               spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
 
-       if (qp->rx_handler && qp->client_ready)
-               qp->rx_handler(qp, qp->cb_data, cb_data, len);
+               if (qp->rx_handler && qp->client_ready)
+                       qp->rx_handler(qp, qp->cb_data, cb_data, len);
+
+               spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+       }
+
+       spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+}
+
+static void ntb_rx_copy_callback(void *data)
+{
+       struct ntb_queue_entry *entry = data;
+
+       entry->flags |= DESC_DONE_FLAG;
+
+       ntb_complete_rxc(entry->qp);
 }
 
 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
        ntb_rx_copy_callback(entry);
 }
 
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
-                        size_t len)
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
 {
        struct dma_async_tx_descriptor *txd;
        struct ntb_transport_qp *qp = entry->qp;
        struct dma_chan *chan = qp->dma_chan;
        struct dma_device *device;
-       size_t pay_off, buff_off;
+       size_t pay_off, buff_off, len;
        struct dmaengine_unmap_data *unmap;
        dma_cookie_t cookie;
        void *buf = entry->buf;
 
-       entry->len = len;
+       len = entry->len;
 
        if (!chan)
                goto err;
@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
        struct ntb_payload_header *hdr;
        struct ntb_queue_entry *entry;
        void *offset;
-       int rc;
 
        offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
        hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
                return -EIO;
        }
 
-       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
        if (!entry) {
                dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
                qp->rx_err_no_buf++;
-
-               rc = -ENOMEM;
-               goto err;
+               return -EAGAIN;
        }
 
+       entry->rx_hdr = hdr;
+       entry->index = qp->rx_index;
+
        if (hdr->len > entry->len) {
                dev_dbg(&qp->ndev->pdev->dev,
                        "receive buffer overflow! Wanted %d got %d\n",
                        hdr->len, entry->len);
                qp->rx_err_oflow++;
 
-               rc = -EIO;
-               goto err;
-       }
+               entry->len = -EIO;
+               entry->flags |= DESC_DONE_FLAG;
 
-       dev_dbg(&qp->ndev->pdev->dev,
-               "RX OK index %u ver %u size %d into buf size %d\n",
-               qp->rx_index, hdr->ver, hdr->len, entry->len);
+               ntb_complete_rxc(qp);
+       } else {
+               dev_dbg(&qp->ndev->pdev->dev,
+                       "RX OK index %u ver %u size %d into buf size %d\n",
+                       qp->rx_index, hdr->ver, hdr->len, entry->len);
 
-       qp->rx_bytes += hdr->len;
-       qp->rx_pkts++;
+               qp->rx_bytes += hdr->len;
+               qp->rx_pkts++;
 
-       entry->index = qp->rx_index;
-       entry->rx_hdr = hdr;
+               entry->len = hdr->len;
 
-       ntb_async_rx(entry, offset, hdr->len);
+               ntb_async_rx(entry, offset);
+       }
 
        qp->rx_index++;
        qp->rx_index %= qp->rx_max_entry;
 
        return 0;
-
-err:
-       /* FIXME: if this syncrhonous update of the rx_index gets ahead of
-        * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
-        * scenarios:
-        *
-        * 1) The peer might miss this update, but observe the update
-        * from the memcpy completion callback.  In this case, the buffer will
-        * not be freed on the peer to be reused for a different packet.  The
-        * successful rx of a later packet would clear the condition, but the
-        * condition could persist if several rx fail in a row.
-        *
-        * 2) The peer may observe this update before the asyncrhonous copy of
-        * prior packets is completed.  The peer may overwrite the buffers of
-        * the prior packets before they are copied.
-        *
-        * 3) Both: the peer may observe the update, and then observe the index
-        * decrement by the asynchronous completion callback.  Who knows what
-        * badness that will cause.
-        */
-       hdr->flags = 0;
-       iowrite32(qp->rx_index, &qp->rx_info->entry);
-
-       return rc;
 }
 
 static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
                        break;
        }
 
-       if (qp->dma_chan)
+       if (i && qp->dma_chan)
                dma_async_issue_pending(qp->dma_chan);
 
        if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
                        goto err1;
 
                entry->qp = qp;
-               ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+               ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
                             &qp->rx_free_q);
        }
 
@@ -1634,7 +1674,7 @@ err2:
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 err1:
-       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
                kfree(entry);
        if (qp->dma_chan)
                dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  */
 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
 {
-       struct ntb_transport_ctx *nt = qp->transport;
        struct pci_dev *pdev;
        struct ntb_queue_entry *entry;
        u64 qp_bit;
@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
        qp->tx_handler = NULL;
        qp->event_handler = NULL;
 
-       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
                kfree(entry);
 
-       while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
-               dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
+               dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
+               kfree(entry);
+       }
+
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
+               dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
                kfree(entry);
        }
 
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 
-       nt->qp_bitmap_free |= qp_bit;
+       qp->transport->qp_bitmap_free |= qp_bit;
 
        dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
 }
@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
        if (!qp || qp->client_ready)
                return NULL;
 
-       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
        if (!entry)
                return NULL;
 
        buf = entry->cb_data;
        *len = entry->len;
 
-       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+       ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
 
        return buf;
 }
@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
        if (!qp)
                return -EINVAL;
 
-       entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+       entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
        if (!entry)
                return -ENOMEM;
 
        entry->cb_data = cb;
        entry->buf = data;
        entry->len = len;
+       entry->flags = 0;
+
+       ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
 
-       ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+       tasklet_schedule(&qp->rxc_db_work);
 
        return 0;
 }
index e17c539e4f6fbb138c04957532f3a329a8556b4a..2dad7e820ff0b16b7447b708f0c147b2e0289f02 100644 (file)
@@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
 
        sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
 }
+EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
 
 static struct phy_ops sun4i_usb_phy_ops = {
        .init           = sun4i_usb_phy_init,
index 3510b81db3faabcda59a7148e31a32ce403805a5..08020dc2c7c8c3496987589841fc0ddb0b1c9ff7 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/delay.h>
 #include <linux/phy/omap_control_phy.h>
 #include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #define        PLL_STATUS              0x00000004
 #define        PLL_GO                  0x00000008
@@ -52,6 +54,8 @@
 #define        PLL_LOCK                0x2
 #define        PLL_IDLE                0x1
 
+#define SATA_PLL_SOFT_RESET    BIT(18)
+
 /*
  * This is an Empirical value that works, need to confirm the actual
  * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -82,6 +86,9 @@ struct ti_pipe3 {
        struct clk              *refclk;
        struct clk              *div_clk;
        struct pipe3_dpll_map   *dpll_map;
+       struct regmap           *dpll_reset_syscon; /* ctrl. reg. acces */
+       unsigned int            dpll_reset_reg; /* reg. index within syscon */
+       bool                    sata_refclk_enabled;
 };
 
 static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -249,8 +256,11 @@ static int ti_pipe3_exit(struct phy *x)
        u32 val;
        unsigned long timeout;
 
-       /* SATA DPLL can't be powered down due to Errata i783 */
-       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
+       /* If dpll_reset_syscon is not present we wont power down SATA DPLL
+        * due to Errata i783
+        */
+       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
+           !phy->dpll_reset_syscon)
                return 0;
 
        /* PCIe doesn't have internal DPLL */
@@ -276,6 +286,14 @@ static int ti_pipe3_exit(struct phy *x)
                }
        }
 
+       /* i783: SATA needs control bit toggle after PLL unlock */
+       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
+               regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+                                  SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
+               regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+                                  SATA_PLL_SOFT_RESET, 0);
+       }
+
        ti_pipe3_disable_clocks(phy);
 
        return 0;
@@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev)
                }
        } else {
                phy->wkupclk = ERR_PTR(-ENODEV);
+               phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
+                                                       "syscon-pllreset");
+               if (IS_ERR(phy->dpll_reset_syscon)) {
+                       dev_info(&pdev->dev,
+                                "can't get syscon-pllreset, sata dpll won't idle\n");
+                       phy->dpll_reset_syscon = NULL;
+               } else {
+                       if (of_property_read_u32_index(node,
+                                                      "syscon-pllreset", 1,
+                                                      &phy->dpll_reset_reg)) {
+                               dev_err(&pdev->dev,
+                                       "couldn't get pllreset reg. offset\n");
+                               return -EINVAL;
+                       }
+               }
        }
 
        if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
@@ -402,10 +435,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, phy);
        pm_runtime_enable(phy->dev);
-       /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */
-       if (of_device_is_compatible(node, "ti,phy-pipe3-sata"))
-               if (!IS_ERR(phy->refclk))
+
+       /*
+        * Prevent auto-disable of refclk for SATA PHY due to Errata i783
+        */
+       if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+               if (!IS_ERR(phy->refclk)) {
                        clk_prepare_enable(phy->refclk);
+                       phy->sata_refclk_enabled = true;
+               }
+       }
 
        generic_phy = devm_phy_create(phy->dev, NULL, &ops);
        if (IS_ERR(generic_phy))
@@ -472,8 +511,18 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
 {
        if (!IS_ERR(phy->wkupclk))
                clk_disable_unprepare(phy->wkupclk);
-       if (!IS_ERR(phy->refclk))
+       if (!IS_ERR(phy->refclk)) {
                clk_disable_unprepare(phy->refclk);
+               /*
+                * SATA refclk needs an additional disable as we left it
+                * on in probe to avoid Errata i783
+                */
+               if (phy->sata_refclk_enabled) {
+                       clk_disable_unprepare(phy->refclk);
+                       phy->sata_refclk_enabled = false;
+               }
+       }
+
        if (!IS_ERR(phy->div_clk))
                clk_disable_unprepare(phy->div_clk);
 }
index cb13299195271ffed4854ed6b0b593c5e8487019..3271cd1abe7c0e6c5d2e813171aa5df494f9d977 100644 (file)
@@ -4,7 +4,6 @@
 
 menuconfig CHROME_PLATFORMS
        bool "Platform support for Chrome hardware"
-       depends on X86 || ARM
        ---help---
          Say Y here to get to see options for platform support for
          various Chromebooks and Chromeboxes. This option alone does
index 98d06d15195806cd915a7ccf23f19447e27854d3..d5cdc4776707b13ad67159c5e0822adf26f392b8 100644 (file)
@@ -2051,9 +2051,49 @@ static int bnx2fc_disable(struct net_device *netdev)
        return rc;
 }
 
+static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
+                                     struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+       struct fc_vport_identifiers vpid;
+       uint i, created = 0;
+
+       if (npiv_tbl->count > MAX_NPIV_ENTRIES) {
+               BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n");
+               goto done;
+       }
+
+       /* Sanity check the first entry to make sure it's not 0 */
+       if (wwn_to_u64(npiv_tbl->wwnn[0]) == 0 &&
+           wwn_to_u64(npiv_tbl->wwpn[0]) == 0) {
+               BNX2FC_HBA_DBG(lport, "First NPIV table entries invalid.\n");
+               goto done;
+       }
+
+       vpid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+       vpid.vport_type = FC_PORTTYPE_NPIV;
+       vpid.disable = false;
+
+       for (i = 0; i < npiv_tbl->count; i++) {
+               vpid.node_name = wwn_to_u64(npiv_tbl->wwnn[i]);
+               vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]);
+               scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name),
+                   "NPIV[%u]:%016llx-%016llx",
+                   created, vpid.port_name, vpid.node_name);
+               if (fc_vport_create(lport->host, 0, &vpid))
+                       created++;
+               else
+                       BNX2FC_HBA_DBG(lport, "Failed to create vport\n");
+       }
+done:
+       return created;
+}
+
 static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
 {
        struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+       struct bnx2fc_hba *hba;
+       struct cnic_fc_npiv_tbl npiv_tbl;
+       struct fc_lport *lport;
 
        if (interface->enabled == false) {
                if (!ctlr->lp) {
@@ -2064,6 +2104,32 @@ static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
                        interface->enabled = true;
                }
        }
+
+       /* Create static NPIV ports if any are contained in NVRAM */
+       hba = interface->hba;
+       lport = ctlr->lp;
+
+       if (!hba)
+               goto done;
+
+       if (!hba->cnic)
+               goto done;
+
+       if (!lport)
+               goto done;
+
+       if (!lport->host)
+               goto done;
+
+       if (!hba->cnic->get_fc_npiv_tbl)
+               goto done;
+
+       memset(&npiv_tbl, 0, sizeof(npiv_tbl));
+       if (hba->cnic->get_fc_npiv_tbl(hba->cnic, &npiv_tbl))
+               goto done;
+
+       bnx2fc_npiv_create_vports(lport, &npiv_tbl);
+done:
        return 0;
 }
 
index 882744852aacb822aa3264bf75fe761b2d494c9f..a9aa38903efefeeccf1dff34ad201af7283d70d9 100644 (file)
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
 {
        struct ipr_trace_entry *trace_entry;
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       unsigned int trace_index;
 
-       trace_entry = &ioa_cfg->trace[atomic_add_return
-                       (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
+       trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
+       trace_entry = &ioa_cfg->trace[trace_index];
        trace_entry->time = jiffies;
        trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
        trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
 
 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
 {
+       unsigned int hrrq;
+
        if (ioa_cfg->hrrq_num == 1)
-               return 0;
-       else
-               return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+               hrrq = 0;
+       else {
+               hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
+               hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
+       }
+       return hrrq;
 }
 
 /**
@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-       unsigned long hrrq_flags;
+       unsigned long lock_flags;
 
        scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
 
        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
                scsi_dma_unmap(scsi_cmd);
 
-               spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
                scsi_cmd->scsi_done(scsi_cmd);
-               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
        } else {
-               spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+               spin_lock(&ipr_cmd->hrrq->_lock);
                ipr_erp_start(ioa_cfg, ipr_cmd);
-               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_unlock(&ipr_cmd->hrrq->_lock);
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
        }
 }
 
index 73790a1d096902fbda79f39ca7bd5c1bd43a1119..6b97ee45c7b460d0719f99baa011a6e13706aa67 100644 (file)
@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
 
 #define IPR_NUM_TRACE_INDEX_BITS       8
 #define IPR_NUM_TRACE_ENTRIES          (1 << IPR_NUM_TRACE_INDEX_BITS)
+#define IPR_TRACE_INDEX_MASK           (IPR_NUM_TRACE_ENTRIES - 1)
 #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
        char trace_start[8];
 #define IPR_TRACE_START_LABEL                  "trace"
index 1b3a094734522803c7f3fecd39da5c297a1feb43..30f9ef0c0d4f8cea52b182f370a04ed1ba4a9908 100644 (file)
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
        if (resp) {
                resp(sp, fp, arg);
                res = true;
-       } else if (!IS_ERR(fp)) {
-               fc_frame_free(fp);
        }
 
        spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
         * If new exch resp handler is valid then call that
         * first.
         */
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
 
        fc_exch_release(ep);
        return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
        fc_exch_hold(ep);
        if (!rc)
                fc_exch_delete(ep);
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
        if (has_rec)
                fc_exch_timer_set(ep, ep->r_a_tov);
        fc_exch_release(ep);
index c6795941b45d98579cd6117cc5b72f8c5c4d0bf9..2d5909c4685ca63375f5041d960effada171f5fc 100644 (file)
@@ -1039,11 +1039,26 @@ restart:
                fc_fcp_pkt_hold(fsp);
                spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
-               if (!fc_fcp_lock_pkt(fsp)) {
+               spin_lock_bh(&fsp->scsi_pkt_lock);
+               if (!(fsp->state & FC_SRB_COMPL)) {
+                       fsp->state |= FC_SRB_COMPL;
+                       /*
+                        * TODO: dropping scsi_pkt_lock and then reacquiring
+                        * again around fc_fcp_cleanup_cmd() is required,
+                        * since fc_fcp_cleanup_cmd() calls into
+                        * fc_seq_set_resp() and that func preempts cpu using
+                        * schedule. May be schedule and related code should be
+                        * removed instead of unlocking here to avoid scheduling
+                        * while atomic bug.
+                        */
+                       spin_unlock_bh(&fsp->scsi_pkt_lock);
+
                        fc_fcp_cleanup_cmd(fsp, error);
+
+                       spin_lock_bh(&fsp->scsi_pkt_lock);
                        fc_io_compl(fsp);
-                       fc_fcp_unlock_pkt(fsp);
                }
+               spin_unlock_bh(&fsp->scsi_pkt_lock);
 
                fc_fcp_pkt_release(fsp);
                spin_lock_irqsave(&si->scsi_queue_lock, flags);
index 8053f24f03499335112721cd50c2816da40393a6..98d9bb6ff725ff46621a408bdf1f208175e21daf 100644 (file)
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
-       unsigned long flags;
 
        del_timer_sync(&conn->transport_timer);
 
+       mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->frwd_lock);
        conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
        if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        }
        spin_unlock_bh(&session->frwd_lock);
 
-       /*
-        * Block until all in-progress commands for this connection
-        * time out or fail.
-        */
-       for (;;) {
-               spin_lock_irqsave(session->host->host_lock, flags);
-               if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
-                       spin_unlock_irqrestore(session->host->host_lock, flags);
-                       break;
-               }
-               spin_unlock_irqrestore(session->host->host_lock, flags);
-               msleep_interruptible(500);
-               iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
-                                 "host_busy %d host_failed %d\n",
-                                 atomic_read(&session->host->host_busy),
-                                 session->host->host_failed);
-               /*
-                * force eh_abort() to unblock
-                */
-               wake_up(&conn->ehwait);
-       }
-
        /* flush queued up work because we free the connection below */
        iscsi_suspend_tx(conn);
 
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        if (session->leadconn == conn)
                session->leadconn = NULL;
        spin_unlock_bh(&session->frwd_lock);
+       mutex_unlock(&session->eh_mutex);
 
        iscsi_destroy_conn(cls_conn);
 }
index 106884a5444e1cb6f61057c29f813c4d713760fa..6457a8a0db9c37ed8892c28effe768a533f2e7f3 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
-#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -944,7 +943,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
                            scmd->sdb.length);
                scmd->sdb.table.sgl = &ses->sense_sgl;
                scmd->sc_data_direction = DMA_FROM_DEVICE;
-               scmd->sdb.table.nents = 1;
+               scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
                scmd->cmnd[0] = REQUEST_SENSE;
                scmd->cmnd[4] = scmd->sdb.length;
                scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
@@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
        }
 }
 EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- *             formatted sense data buffer
- * @buf:       Where to build sense data
- * @info:      64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
-       if ((buf[0] & 0x7f) == 0x72) {
-               u8 *ucp, len;
-
-               len = buf[7];
-               ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
-               if (!ucp) {
-                       buf[7] = len + 0xa;
-                       ucp = buf + 8 + len;
-               }
-               ucp[0] = 0;
-               ucp[1] = 0xa;
-               ucp[2] = 0x80; /* Valid bit */
-               ucp[3] = 0;
-               put_unaligned_be64(info, &ucp[4]);
-       } else if ((buf[0] & 0x7f) == 0x70) {
-               buf[0] |= 0x80;
-               put_unaligned_be64(info, &buf[3]);
-       }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
index b1a263137a23391a1e19c2589f35fdaf2c4f514f..448ebdaa3d694758dd899b1c06ce379d0676898a 100644 (file)
@@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
 
 static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
 {
-       if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
+       if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
                return;
        __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
 }
@@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
 
        if (mq) {
                if (nents <= SCSI_MAX_SG_SEGMENTS) {
-                       sdb->table.nents = nents;
-                       sg_init_table(sdb->table.sgl, sdb->table.nents);
+                       sdb->table.nents = sdb->table.orig_nents = nents;
+                       sg_init_table(sdb->table.sgl, nents);
                        return 0;
                }
                first_chunk = sdb->table.sgl;
index 3b2fcb4fada0491c4500b42555fdef542b4399d2..a20da8c25b4f960224fb4d772aafea38c57e1656 100644 (file)
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
        max_xfer = sdkp->max_xfer_blocks;
        max_xfer <<= ilog2(sdp->sector_size) - 9;
 
-       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
-                               max_xfer);
-       blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+       sdkp->disk->queue->limits.max_sectors =
+               min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);
        kfree(buffer);
index bfa42620a3f607857978895a49451b9177964374..940781183fac4e450503c7b3cd5df6c30bde6d0c 100644 (file)
@@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev)
                if (index == das1801hc || index == das1802hc)
                        return board;
                index = das1801hc;
+               break;
        default:
                dev_err(dev->class_dev,
                        "Board model: probe returned 0x%x (unknown, please report)\n",
index 9c934e6d2ea1114094921bd890eb0186a50f0e06..c61add46b4268b722eeaad74a0540a4a2c48f1e8 100644 (file)
@@ -40,7 +40,7 @@
 
 #define DEBUG_SUBSYSTEM D_OTHER
 
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
 
 #include "../include/obd_support.h"
 #include "../include/lustre_debug.h"
index b0c8e235b982164bb170b53655ccfc8d01378d6b..69bdc8f29b59f4c1e1cbacabf7563a7ca1d66817 100644 (file)
@@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
-       if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
-               if (conf->assoc) {
+       if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
+           priv->op_mode != NL80211_IFTYPE_AP) {
+               if (conf->assoc && conf->beacon_rate) {
                        CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
                                       conf->sync_tsf);
 
index f5296f53a3d24186e235db3972b6eac75c3a6cdc..a1f66addda8da97eacb5ece261e23f65b26a95b0 100644 (file)
@@ -493,7 +493,7 @@ static void WILC_WFI_mon_setup(struct net_device *dev)
        /* dev->destructor = free_netdev; */
        PRINT_INFO(CORECONFIG_DBG, "In Ethernet setup function\n");
        ether_setup(dev);
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
        dev->type = ARPHRD_IEEE80211_RADIOTAP;
        memset(dev->dev_addr, 0, ETH_ALEN);
 
index cd77a064c772f1bbe897482d2372fe5bc6434328..fd092909a4577a7c4a708516bf3344fee331ad84 100644 (file)
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
 
        conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
-       if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+       if (hdr->flags & ISCSI_FLAG_CMD_READ)
                cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
-       } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+       else
                cmd->targ_xfer_tag = 0xFFFFFFFF;
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
index c2e9fea90b4a4bc16a0384d79fa9684c9f4176e0..860e840461778271191ba4ee8f1a4011dba5e78c 100644 (file)
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
                if (!strcmp(t->tf_ops->name, fo->name)) {
                        BUG_ON(atomic_read(&t->tf_access_cnt));
                        list_del(&t->tf_list);
+                       mutex_unlock(&g_tf_lock);
+                       /*
+                        * Wait for any outstanding fabric se_deve_entry->rcu_head
+                        * callbacks to complete post kfree_rcu(), before allowing
+                        * fabric driver unload of TFO->module to proceed.
+                        */
+                       rcu_barrier();
                        kfree(t);
-                       break;
+                       return;
                }
        }
        mutex_unlock(&g_tf_lock);
index 62ea4e8e70a8935398f2a0e86fc44627dfa3368e..be9cefc07407e80ef5dd7dfcbd8a0d025faf97f6 100644 (file)
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
        list_for_each_entry(tb, &backend_list, list) {
                if (tb->ops == ops) {
                        list_del(&tb->list);
+                       mutex_unlock(&backend_mutex);
+                       /*
+                        * Wait for any outstanding backend driver ->rcu_head
+                        * callbacks to complete post TBO->free_device() ->
+                        * call_rcu(), before allowing backend driver module
+                        * unload of target_backend_ops->owner to proceed.
+                        */
+                       rcu_barrier();
                        kfree(tb);
-                       break;
+                       return;
                }
        }
        mutex_unlock(&backend_mutex);
index b5ba1ec3c35476361103d7dca47a1934cdd3289f..f87d4cef6d398c072e953e7eaa6b5d9d5b469d70 100644 (file)
@@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
        struct se_dev_entry *deve;
        struct se_session *sess = cmd->se_sess;
        struct se_node_acl *nacl;
+       struct scsi_lun slun;
        unsigned char *buf;
        u32 lun_count = 0, offset = 8;
-
-       if (cmd->data_length < 16) {
-               pr_warn("REPORT LUNS allocation length %u too small\n",
-                       cmd->data_length);
-               return TCM_INVALID_CDB_FIELD;
-       }
+       __be32 len;
 
        buf = transport_kmap_data_sg(cmd);
-       if (!buf)
+       if (cmd->data_length && !buf)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        /*
@@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
         * coming via a target_core_mod PASSTHROUGH op, and not through
         * a $FABRIC_MOD.  In that case, report LUN=0 only.
         */
-       if (!sess) {
-               int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
-               lun_count = 1;
+       if (!sess)
                goto done;
-       }
+
        nacl = sess->se_node_acl;
 
        rcu_read_lock();
@@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
                 * See SPC2-R20 7.19.
                 */
                lun_count++;
-               if ((offset + 8) > cmd->data_length)
+               if (offset >= cmd->data_length)
                        continue;
 
-               int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+               int_to_scsilun(deve->mapped_lun, &slun);
+               memcpy(buf + offset, &slun,
+                      min(8u, cmd->data_length - offset));
                offset += 8;
        }
        rcu_read_unlock();
@@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
         * See SPC3 r07, page 159.
         */
 done:
-       lun_count *= 8;
-       buf[0] = ((lun_count >> 24) & 0xff);
-       buf[1] = ((lun_count >> 16) & 0xff);
-       buf[2] = ((lun_count >> 8) & 0xff);
-       buf[3] = (lun_count & 0xff);
-       transport_kunmap_data_sg(cmd);
+       /*
+        * If no LUNs are accessible, report virtual LUN 0.
+        */
+       if (lun_count == 0) {
+               int_to_scsilun(0, &slun);
+               if (cmd->data_length > 8)
+                       memcpy(buf + offset, &slun,
+                              min(8u, cmd->data_length - offset));
+               lun_count = 1;
+       }
+
+       if (buf) {
+               len = cpu_to_be32(lun_count * 8);
+               memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
        return 0;
index 6509c61b96484993333a4198138945f43154fdfd..620dcd405ff6eec9ae65b0af8e93c25daae15d1f 100644 (file)
@@ -68,7 +68,7 @@ struct power_table {
  *     registered cooling device.
  * @cpufreq_state: integer value representing the current state of cpufreq
  *     cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
  *     frequency.
  * @max_level: maximum cooling level. One less than total number of valid
  *     cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
        int id;
        struct thermal_cooling_device *cool_dev;
        unsigned int cpufreq_state;
-       unsigned int cpufreq_val;
+       unsigned int clipped_freq;
        unsigned int max_level;
        unsigned int *freq_table;       /* In descending order */
        struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
 static LIST_HEAD(cpufreq_dev_list);
 
 /**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
 {
        struct cpufreq_cooling_device *cpufreq_dev;
 
-       mutex_lock(&cooling_cpufreq_lock);
+       mutex_lock(&cooling_list_lock);
        list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
                if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
-                       mutex_unlock(&cooling_cpufreq_lock);
+                       mutex_unlock(&cooling_list_lock);
                        return get_level(cpufreq_dev, freq);
                }
        }
-       mutex_unlock(&cooling_cpufreq_lock);
+       mutex_unlock(&cooling_list_lock);
 
        pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
        return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
                                    unsigned long event, void *data)
 {
        struct cpufreq_policy *policy = data;
-       unsigned long max_freq = 0;
+       unsigned long clipped_freq;
        struct cpufreq_cooling_device *cpufreq_dev;
 
-       switch (event) {
+       if (event != CPUFREQ_ADJUST)
+               return NOTIFY_DONE;
 
-       case CPUFREQ_ADJUST:
-               mutex_lock(&cooling_cpufreq_lock);
-               list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
-                       if (!cpumask_test_cpu(policy->cpu,
-                                             &cpufreq_dev->allowed_cpus))
-                               continue;
+       mutex_lock(&cooling_list_lock);
+       list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+               if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+                       continue;
 
-                       max_freq = cpufreq_dev->cpufreq_val;
+               /*
+                * policy->max is the maximum allowed frequency defined by user
+                * and clipped_freq is the maximum that thermal constraints
+                * allow.
+                *
+                * If clipped_freq is lower than policy->max, then we need to
+                * readjust policy->max.
+                *
+                * But, if clipped_freq is greater than policy->max, we don't
+                * need to do anything.
+                */
+               clipped_freq = cpufreq_dev->clipped_freq;
 
-                       if (policy->max != max_freq)
-                               cpufreq_verify_within_limits(policy, 0,
-                                                            max_freq);
-               }
-               mutex_unlock(&cooling_cpufreq_lock);
+               if (policy->max > clipped_freq)
+                       cpufreq_verify_within_limits(policy, 0, clipped_freq);
                break;
-       default:
-               return NOTIFY_DONE;
        }
+       mutex_unlock(&cooling_list_lock);
 
        return NOTIFY_OK;
 }
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 
        clip_freq = cpufreq_device->freq_table[state];
        cpufreq_device->cpufreq_state = state;
-       cpufreq_device->cpufreq_val = clip_freq;
+       cpufreq_device->clipped_freq = clip_freq;
 
        cpufreq_update_policy(cpu);
 
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
                        pr_debug("%s: freq:%u KHz\n", __func__, freq);
        }
 
-       cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+       cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
        cpufreq_dev->cool_dev = cool_dev;
 
        mutex_lock(&cooling_cpufreq_lock);
 
+       mutex_lock(&cooling_list_lock);
+       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+       mutex_unlock(&cooling_list_lock);
+
        /* Register the notifier for first cpufreq cooling device */
-       if (list_empty(&cpufreq_dev_list))
+       if (!cpufreq_dev_count++)
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
-       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
        mutex_unlock(&cooling_cpufreq_lock);
 
        return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
                return;
 
        cpufreq_dev = cdev->devdata;
-       mutex_lock(&cooling_cpufreq_lock);
-       list_del(&cpufreq_dev->node);
 
        /* Unregister the notifier for the last cpufreq cooling device */
-       if (list_empty(&cpufreq_dev_list))
+       mutex_lock(&cooling_cpufreq_lock);
+       if (!--cpufreq_dev_count)
                cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
                                            CPUFREQ_POLICY_NOTIFIER);
+
+       mutex_lock(&cooling_list_lock);
+       list_del(&cpufreq_dev->node);
+       mutex_unlock(&cooling_list_lock);
+
        mutex_unlock(&cooling_cpufreq_lock);
 
        thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
index d5dd357ba57c4c6af785b4f915d9fccba6a4fa21..b49f97c734d00ddccb50379d3131c232651e96e5 100644 (file)
@@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
 static struct platform_driver hisi_thermal_driver = {
        .driver = {
                .name           = "hisi_thermal",
-               .owner          = THIS_MODULE,
                .pm             = &hisi_thermal_pm_ops,
                .of_match_table = of_hisi_thermal_match,
        },
index 4672250b329f4cec54ab243b55b41b127b1c48d0..7006860f2f3693b04ee44996c5085e2f94ceae44 100644 (file)
@@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz,
        struct thermal_instance *instance;
        struct power_allocator_params *params = tz->governor_data;
        u32 *req_power, *max_power, *granted_power, *extra_actor_power;
-       u32 total_req_power, max_allocatable_power;
+       u32 *weighted_req_power;
+       u32 total_req_power, max_allocatable_power, total_weighted_req_power;
        u32 total_granted_power, power_range;
        int i, num_actors, total_weight, ret = 0;
        int trip_max_desired_temperature = params->trip_max_desired_temperature;
@@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz,
        }
 
        /*
-        * We need to allocate three arrays of the same size:
-        * req_power, max_power and granted_power.  They are going to
-        * be needed until this function returns.  Allocate them all
-        * in one go to simplify the allocation and deallocation
-        * logic.
+        * We need to allocate five arrays of the same size:
+        * req_power, max_power, granted_power, extra_actor_power and
+        * weighted_req_power.  They are going to be needed until this
+        * function returns.  Allocate them all in one go to simplify
+        * the allocation and deallocation logic.
         */
        BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
        BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
        BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
-       req_power = devm_kcalloc(&tz->device, num_actors * 4,
+       BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
+       req_power = devm_kcalloc(&tz->device, num_actors * 5,
                                 sizeof(*req_power), GFP_KERNEL);
        if (!req_power) {
                ret = -ENOMEM;
@@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz,
        max_power = &req_power[num_actors];
        granted_power = &req_power[2 * num_actors];
        extra_actor_power = &req_power[3 * num_actors];
+       weighted_req_power = &req_power[4 * num_actors];
 
        i = 0;
+       total_weighted_req_power = 0;
        total_req_power = 0;
        max_allocatable_power = 0;
 
@@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz,
                else
                        weight = instance->weight;
 
-               req_power[i] = frac_to_int(weight * req_power[i]);
+               weighted_req_power[i] = frac_to_int(weight * req_power[i]);
 
                if (power_actor_get_max_power(cdev, tz, &max_power[i]))
                        continue;
 
                total_req_power += req_power[i];
                max_allocatable_power += max_power[i];
+               total_weighted_req_power += weighted_req_power[i];
 
                i++;
        }
@@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz,
        power_range = pid_controller(tz, current_temp, control_temp,
                                     max_allocatable_power);
 
-       divvy_up_power(req_power, max_power, num_actors, total_req_power,
-                      power_range, granted_power, extra_actor_power);
+       divvy_up_power(weighted_req_power, max_power, num_actors,
+                      total_weighted_req_power, power_range, granted_power,
+                      extra_actor_power);
 
        total_granted_power = 0;
        i = 0;
@@ -328,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
                                      max_allocatable_power, current_temp,
                                      (s32)control_temp - (s32)current_temp);
 
-       devm_kfree(&tz->device, req_power);
+       kfree(req_power);
 unlock:
        mutex_unlock(&tz->lock);
 
@@ -420,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
                return -EINVAL;
        }
 
-       params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
+       params = kzalloc(sizeof(*params), GFP_KERNEL);
        if (!params)
                return -ENOMEM;
 
@@ -462,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
        return 0;
 
 free:
-       devm_kfree(&tz->device, params);
+       kfree(params);
        return ret;
 }
 
 static void power_allocator_unbind(struct thermal_zone_device *tz)
 {
        dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
-       devm_kfree(&tz->device, tz->governor_data);
+       kfree(tz->governor_data);
        tz->governor_data = NULL;
 }
 
index c8e35c1a43dcfd19145a6d1e24b132b25b5c6169..e0da3865e0600f8f23b6368be63fd662d5dda6d1 100644 (file)
@@ -1,6 +1,6 @@
 config EXYNOS_THERMAL
        tristate "Exynos thermal management unit driver"
-       depends on OF
+       depends on THERMAL_OF
        help
          If you say yes here you get support for the TMU (Thermal Management
          Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
index 531f4b179871f63da7fea6f32af19b27af18e9a3..c96ff10b869efd941bfe8c32384d48b70b1c348d 100644 (file)
@@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
 
 static int exynos_tmu_probe(struct platform_device *pdev)
 {
-       struct exynos_tmu_platform_data *pdata;
        struct exynos_tmu_data *data;
        int ret;
 
@@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        if (ret)
                goto err_sensor;
 
-       pdata = data->pdata;
-
        INIT_WORK(&data->irq_work, exynos_tmu_work);
 
        data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1392,6 +1389,8 @@ err_clk_sec:
        if (!IS_ERR(data->clk_sec))
                clk_unprepare(data->clk_sec);
 err_sensor:
+       if (!IS_ERR_OR_NULL(data->regulator))
+               regulator_disable(data->regulator);
        thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
 
        return ret;
index 04659bfb888b73237257b75f944bf9130bc241b6..4ca211be4c0f197825be94f70be386af1c2cc33d 100644 (file)
@@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
        return -ENODEV;
 
 unbind:
+       device_remove_file(&tz->device, &pos->weight_attr);
        device_remove_file(&tz->device, &pos->attr);
        sysfs_remove_link(&tz->device.kobj, pos->name);
        release_idr(&tz->idr, &tz->lock, pos->id);
index 74fea4fa41b156248ce6d7116db02b54990066a9..3ad48e1c0c57e1722311c8393ad3bd21712fa1e7 100644 (file)
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
        },
 };
 
-module_platform_driver(ci_hdrc_driver);
+static int __init ci_hdrc_platform_register(void)
+{
+       ci_hdrc_host_driver_init();
+       return platform_driver_register(&ci_hdrc_driver);
+}
+module_init(ci_hdrc_platform_register);
+
+static void __exit ci_hdrc_platform_unregister(void)
+{
+       platform_driver_unregister(&ci_hdrc_driver);
+}
+module_exit(ci_hdrc_platform_unregister);
 
 MODULE_ALIAS("platform:ci_hdrc");
 MODULE_LICENSE("GPL v2");
index 6cf87b8b13a8a606b5ccf680e4635fcbc44874a1..7161439def19aa265c9f36530d3d97d63ecc51a7 100644 (file)
@@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
        rdrv->name      = "host";
        ci->roles[CI_ROLE_HOST] = rdrv;
 
+       return 0;
+}
+
+void ci_hdrc_host_driver_init(void)
+{
        ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
        orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
        ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
-
-       return 0;
 }
index 5707bf379bfb4b7ce98ff4a79d585619d1c64b48..0f12f131bdd3f22671eaf170476e2511950fa1be 100644 (file)
@@ -5,6 +5,7 @@
 
 int ci_hdrc_host_init(struct ci_hdrc *ci);
 void ci_hdrc_host_destroy(struct ci_hdrc *ci);
+void ci_hdrc_host_driver_init(void);
 
 #else
 
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
 
 }
 
+static void ci_hdrc_host_driver_init(void)
+{
+
+}
+
 #endif
 
 #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
index f7f35a36c09a06eab17ef2e5af013ee3de2b5b8e..6df9715a4bcd31179cb190100df45b2dd975a0d2 100644 (file)
@@ -699,6 +699,10 @@ static inline int hidg_get_minor(void)
        int ret;
 
        ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
+       if (ret >= HIDG_MINORS) {
+               ida_simple_remove(&hidg_ida, ret);
+               ret = -ENODEV;
+       }
 
        return ret;
 }
index 44173df272739543a6b3168323073dba45239760..357f63f47b42aba69d92e24346963645227d39e2 100644 (file)
@@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = {
 
 static inline int gprinter_get_minor(void)
 {
-       return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+       int ret;
+
+       ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+       if (ret >= PRINTER_MINORS) {
+               ida_simple_remove(&printer_ida, ret);
+               ret = -ENODEV;
+       }
+
+       return ret;
 }
 
 static inline void gprinter_put_minor(int minor)
index 6d3eb8b00a488446db954334e80ac12eccf0d5cf..53186154725330d4c1f710e4829d4a8b25b614cd 100644 (file)
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
                        factor = 1000;
                } else {
                        ep_desc = &hs_epin_desc;
-                       factor = 125;
+                       factor = 8000;
                }
 
                /* pre-compute some values for iso_complete() */
                uac2->p_framesize = opts->p_ssize *
                                    num_channels(opts->p_chmask);
                rate = opts->p_srate * uac2->p_framesize;
-               uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
+               uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
                uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
                                        prm->max_psize);
 
index b04980cf6dc42108f4861e4dfa7285dd4fe56af9..1efa61265d8d49c5116027c8e3555ae70b9cc12c 100644 (file)
@@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
        /* The current hw dequeue pointer */
        tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
        deq_ptr_64 = tmp_32;
-       tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1));
+       tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
        deq_ptr_64 |= ((u64)tmp_32 << 32);
 
        /* we have the dma addr of next bd that will be fetched by hardware */
index 362ee8af5fce87df4a2e7779f743c2ec722d9a5c..89ed5e71a1991e0cd249c48b18bd04dd67bacf91 100644 (file)
@@ -323,6 +323,7 @@ err4:
 
 err3:
        put_device(&udc->dev);
+       device_del(&gadget->dev);
 
 err2:
        put_device(&gadget->dev);
index 3e442f77a2b9367c5bd90d2ab69beb2db841fd26..9a8c936cd42c18ef72a695d45c7e4ce2e893f8b0 100644 (file)
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        int size;
        int i, j, num_ports;
 
-       del_timer_sync(&xhci->cmd_timer);
+       if (timer_pending(&xhci->cmd_timer))
+               del_timer_sync(&xhci->cmd_timer);
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
index 6a8fc52aed5863391885ac11c2661a407f707a30..32f4d564494a9f48cfebd328e61d3c281387d252 100644 (file)
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
                return 0;
        /* offset in TRBs */
        segment_offset = trb - seg->trbs;
-       if (segment_offset > TRBS_PER_SEGMENT)
+       if (segment_offset >= TRBS_PER_SEGMENT)
                return 0;
        return seg->dma + (segment_offset * sizeof(*trb));
 }
index 19b85ee98a7247c46089e023676633e70eb498df..876423b8892c96f80930fa7a103f36370e519dfe 100644 (file)
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
          .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
+       { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
+         .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
index 9c63897b3a564012ea63f99b9e5e73bc48b93d36..d156545728c2ab5b8bb81cf7537aa8a60805c08a 100644 (file)
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x1199, 0x901c)},   /* Sierra Wireless EM7700 */
        {DEVICE_SWI(0x1199, 0x901f)},   /* Sierra Wireless EM7355 */
        {DEVICE_SWI(0x1199, 0x9040)},   /* Sierra Wireless Modem */
-       {DEVICE_SWI(0x1199, 0x9041)},   /* Sierra Wireless MC7305/MC7355 */
        {DEVICE_SWI(0x1199, 0x9051)},   /* Netgear AirCard 340U */
        {DEVICE_SWI(0x1199, 0x9053)},   /* Sierra Wireless Modem */
        {DEVICE_SWI(0x1199, 0x9054)},   /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a8)},   /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a9)},   /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {DEVICE_SWI(0x413c, 0x81b1)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
 
        /* Huawei devices */
        {DEVICE_HWI(0x03f0, 0x581d)},   /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
index 46179a0828ebcbad9a78c11dad8044edff27664a..07d1ecd564f79d9c51798f6941b1295d794d468c 100644 (file)
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
        /* AT&T Direct IP LTE modems */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
index 658c34bb9076f813058dfb03a47907ca48c6eb0a..1aaf89300621abc811f57f549c25b9a540a21d99 100644 (file)
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
        int y;
        int c = scr_readw((u16 *) vc->vc_pos);
 
+       ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+
        if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
                return;
 
-       ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
        if (vc->vc_cursor_type & 0x10)
                fbcon_del_cursor_timer(info);
        else
index 2d98de535e0f7374804474c58de752ffb2848aa4..f888561568d91735a3a765a772630e8b2be0a54a 100644 (file)
@@ -298,7 +298,7 @@ config FB_ARMCLCD
 
 # Helper logic selected only by the ARM Versatile platform family.
 config PLAT_VERSATILE_CLCD
-       def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
+       def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
        depends on ARM
        depends on FB_ARMCLCD && FB=y
 
index 928ee639c0c19ba3a2346737c41b44f38e82c0b5..bf407b6ba15ca0002166a0704124eeda0fb2052e 100644 (file)
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
                        }
                        prev = port;
                } while (of_node_cmp(port->name, "port") != 0);
+
+               of_node_put(ports);
        }
 
        return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
        if (!port)
                return NULL;
 
-       np = of_get_next_parent(port);
+       np = of_get_parent(port);
 
        for (i = 0; i < 2 && np; ++i) {
                struct property *prop;
index 86bd457d039d2ad9a85e82d3f26a3630e3134da7..50bce45e7f3d47d78163058eff366b32d4f56180 100644 (file)
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
                goto err_free_dma;
        }
 
-       ret = clk_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable clock\n");
                goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
        misc_deregister(&priv->misc_dev);
 
 err_disable_clk:
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return ret;
 }
index 111c2d1911d32ea38e86b11c0af753133ccfab05..b5102aa6090d111e25727f78422c8cbc183f086a 100644 (file)
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
                index = disp->native_mode;
 
        ret = videomode_from_timings(disp, vm, index);
-       if (ret)
-               return ret;
 
        display_timings_release(disp);
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(of_get_videomode);
index 60e2a16775637b778a8faeb8f2b3f84810a88716..c96944b59856c10c7d28c189b5ff86dbe4b3c932 100644 (file)
@@ -313,6 +313,7 @@ err_init_vq:
 static void virtinput_remove(struct virtio_device *vdev)
 {
        struct virtio_input *vi = vdev->priv;
+       void *buf;
        unsigned long flags;
 
        spin_lock_irqsave(&vi->lock, flags);
@@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev)
        spin_unlock_irqrestore(&vi->lock, flags);
 
        input_unregister_device(vi->idev);
+       vdev->config->reset(vdev);
+       while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
+               kfree(buf);
        vdev->config->del_vqs(vdev);
        kfree(vi);
 }
index fd933695f2328f29c2493ee751f22230ec68cbb1..bf4a23c7c5918f6849e764a8376c3608cc591933 100644 (file)
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 }
 
 /*
- * We avoid multiple worker processes conflicting via the balloon mutex.
+ * As this is a work item it is guaranteed to run as a single instance only.
  * We may of course race updates of the target counts (which are protected
  * by the balloon lock), or with changes to the Xen hard limit, but we will
  * recover from these in time.
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
        enum bp_state state = BP_DONE;
        long credit;
 
-       mutex_lock(&balloon_mutex);
 
        do {
+               mutex_lock(&balloon_mutex);
+
                credit = current_credit();
 
                if (credit > 0) {
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
 
                state = update_schedule(state);
 
-#ifndef CONFIG_PREEMPT
-               if (need_resched())
-                       schedule();
-#endif
+               mutex_unlock(&balloon_mutex);
+
+               cond_resched();
+
        } while (credit && state == BP_DONE);
 
        /* Schedule more work if there is some still to be done. */
        if (state == BP_EAGAIN)
                schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
-
-       mutex_unlock(&balloon_mutex);
 }
 
 /* Resets the Xen limit, sets new target, and kicks off processing. */
index 67b9163db7185402b0ff3811c5363c1a1022e2c7..0dbb222daaf1c694b1f073f3e206f755f5f77cc6 100644 (file)
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
 
        pr_debug("priv %p\n", priv);
 
+       mutex_lock(&priv->lock);
        while (!list_empty(&priv->maps)) {
                map = list_entry(priv->maps.next, struct grant_map, next);
                list_del(&map->next);
                gntdev_put_map(NULL /* already removed */, map);
        }
        WARN_ON(!list_empty(&priv->freeable_maps));
+       mutex_unlock(&priv->lock);
 
        if (use_ptemod)
                mmu_notifier_unregister(&priv->mn, priv->mm);
index 9ad327238ba931243967455b5790916dc6b184f1..e30353575d5da11f75e8c927ba53945a23b73d76 100644 (file)
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
 
        rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
                               addrs);
-       if (!rv)
+       if (!rv) {
                vunmap(vaddr);
+               free_xenballooned_pages(node->nr_handles, node->hvm.pages);
+       }
        else
                WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
                     node->nr_handles);
index e9ace099162ce14d73d04523962465ee163b30c7..8a820295657686d634e3a5ded2ee21b5dcefc026 100644 (file)
@@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
                                /* Exclusive -> exclusive, nothing changed */
                        }
                }
+
+               /* For exclusive extent, free its reserved bytes too */
+               if (nr_old_roots == 0 && nr_new_roots == 1 &&
+                   cur_new_count == nr_new_roots)
+                       qg->reserved -= num_bytes;
                if (dirty)
                        qgroup_dirty(fs_info, qg);
        }
index dc10c9dd36c1a2ac6264ed21d3248e5f62f1e330..ddd5e94712904501db729c51b59de72cd88ddb5a 100644 (file)
@@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode,
 
        swap(cf, ci->i_prealloc_cap_flush);
        cf->caps = flushing;
-       cf->kick = false;
 
        spin_lock(&mdsc->cap_dirty_lock);
        list_del_init(&ci->i_dirty_item);
@@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
 
 static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
                                struct ceph_mds_session *session,
-                               struct ceph_inode_info *ci,
-                               bool kick_all)
+                               struct ceph_inode_info *ci)
 {
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap *cap;
@@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
 
                for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
                        cf = rb_entry(n, struct ceph_cap_flush, i_node);
-                       if (cf->tid < first_tid)
-                               continue;
-                       if (kick_all || cf->kick)
+                       if (cf->tid >= first_tid)
                                break;
                }
                if (!n) {
@@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
                }
 
                cf = rb_entry(n, struct ceph_cap_flush, i_node);
-               cf->kick = false;
 
                first_tid = cf->tid + 1;
 
@@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
 {
        struct ceph_inode_info *ci;
        struct ceph_cap *cap;
-       struct ceph_cap_flush *cf;
-       struct rb_node *n;
 
        dout("early_kick_flushing_caps mds%d\n", session->s_mds);
        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
@@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
                if ((cap->issued & ci->i_flushing_caps) !=
                    ci->i_flushing_caps) {
                        spin_unlock(&ci->i_ceph_lock);
-                       if (!__kick_flushing_caps(mdsc, session, ci, true))
+                       if (!__kick_flushing_caps(mdsc, session, ci))
                                continue;
                        spin_lock(&ci->i_ceph_lock);
                }
 
-               for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
-                       cf = rb_entry(n, struct ceph_cap_flush, i_node);
-                       cf->kick = true;
-               }
-
                spin_unlock(&ci->i_ceph_lock);
        }
 }
@@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
 
        dout("kick_flushing_caps mds%d\n", session->s_mds);
        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
-               int delayed = __kick_flushing_caps(mdsc, session, ci, false);
+               int delayed = __kick_flushing_caps(mdsc, session, ci);
                if (delayed) {
                        spin_lock(&ci->i_ceph_lock);
                        __cap_delay_requeue(mdsc, ci);
@@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
 
                spin_unlock(&ci->i_ceph_lock);
 
-               delayed = __kick_flushing_caps(mdsc, session, ci, true);
+               delayed = __kick_flushing_caps(mdsc, session, ci);
                if (delayed) {
                        spin_lock(&ci->i_ceph_lock);
                        __cap_delay_requeue(mdsc, ci);
index 4347039ecc183d538c23f32019e5213da2ebf2f4..6706bde9ad1b1e16e6a283a83ea93c4f58b6b2aa 100644 (file)
@@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
                return 0;
 
        spin_lock(&ctx->flc_lock);
-       list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+       list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
                ++seen_fcntl;
                if (seen_fcntl > num_fcntl_locks) {
                        err = -ENOSPC;
index 860cc016e70d4ff463c1f7845fc648eaf58269c4..2f2460d23a0600f8f9bf2e1cc4fe3b2286684356 100644 (file)
@@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
 struct ceph_cap_flush {
        u64 tid;
        int caps;
-       bool kick;
        struct rb_node g_node; // global
        union {
                struct rb_node i_node; // inode
index 5c8ea15e73a53b6b6dbe3e9660973d2eda9c7800..9b5fe503f6cb6c8d76044bb8272084fdb0474c9b 100644 (file)
@@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void)
        inode_init_early();
 }
 
-void __init vfs_caches_init(unsigned long mempages)
+void __init vfs_caches_init(void)
 {
-       unsigned long reserve;
-
-       /* Base hash sizes on available memory, with a reserve equal to
-           150% of current kernel size */
-
-       reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
-       mempages -= reserve;
-
        names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
        dcache_init();
        inode_init();
-       files_init(mempages);
+       files_init();
+       files_maxfiles_init();
        mnt_init();
        bdev_cache_init();
        chrdev_init();
index 7f9d407c759596f950335bd418ab0226f4a629f8..ad17e05ebf95f07888b15f2b09a7b37ac89b9710 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/hardirq.h>
 #include <linux/task_work.h>
 #include <linux/ima.h>
+#include <linux/swap.h>
 
 #include <linux/atomic.h>
 
@@ -308,19 +309,24 @@ void put_filp(struct file *file)
        }
 }
 
-void __init files_init(unsigned long mempages)
+void __init files_init(void)
 { 
-       unsigned long n;
-
        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
                        SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+       percpu_counter_init(&nr_files, 0, GFP_KERNEL);
+}
 
-       /*
-        * One file with associated inode and dcache is very roughly 1K.
-        * Per default don't use more than 10% of our memory for files. 
-        */ 
+/*
+ * One file with associated inode and dcache is very roughly 1K. Per default
+ * do not use more than 10% of our memory for files.
+ */
+void __init files_maxfiles_init(void)
+{
+       unsigned long n;
+       unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
+
+       memreserve = min(memreserve, totalram_pages - 1);
+       n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
 
-       n = (mempages * (PAGE_SIZE / 1024)) / 10;
        files_stat.max_files = max_t(unsigned long, n, NR_FILE);
-       percpu_counter_init(&nr_files, 0, GFP_KERNEL);
 } 
index 80cc1b35d46043c16bc456e0cadf61e76c281d52..ebb5e37455a07acd86f5fbf1b76d474e99b937fb 100644 (file)
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
 
                        err = -EINVAL;
                        if (old) {
-                               struct fuse_dev *fud = fuse_get_dev(old);
+                               struct fuse_dev *fud = NULL;
+
+                               /*
+                                * Check against file->f_op because CUSE
+                                * uses the same ioctl handler.
+                                */
+                               if (old->f_op == file->f_op &&
+                                   old->f_cred->user_ns == file->f_cred->user_ns)
+                                       fud = fuse_get_dev(old);
 
                                if (fud) {
                                        mutex_lock(&fuse_mutex);
index 0cf74df68617b8738342a5f7be7992ccc596bf0a..973c24ce59ad3ef1b62ff3ce00113d7d85cedb68 100644 (file)
@@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
        inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
        if (!inode)
                goto out_dentry;
+       if (creat_flags == HUGETLB_SHMFS_INODE)
+               inode->i_flags |= S_PRIVATE;
 
        file = ERR_PTR(-ENOMEM);
        if (hugetlb_reserve_pages(inode, 0,
index ae4e4c18b2ac0b2c366f893f7ffa4d898446c0f7..1c2105ed20c5ef4fb390878fb5442943ceec29ae 100644 (file)
@@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd)
                return 0;
 
        /* Allowed if parent directory not sticky and world-writable. */
-       parent = nd->path.dentry->d_inode;
+       parent = nd->inode;
        if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
                return 0;
 
@@ -1954,8 +1954,13 @@ OK:
                                continue;
                        }
                }
-               if (unlikely(!d_can_lookup(nd->path.dentry)))
+               if (unlikely(!d_can_lookup(nd->path.dentry))) {
+                       if (nd->flags & LOOKUP_RCU) {
+                               if (unlazy_walk(nd, NULL, 0))
+                                       return -ECHILD;
+                       }
                        return -ENOTDIR;
+               }
        }
 }
 
index 6904213a436368e47628af85701a3beb68e0550b..ebf90e487c752b59270aa559588a8805468ad0f5 100644 (file)
@@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
        BUG_ON(!ls->ls_file);
 
        if (nfsd4_layout_setlease(ls)) {
+               fput(ls->ls_file);
                put_nfs4_file(fp);
                kmem_cache_free(nfs4_layout_stateid_cache, ls);
                return NULL;
index 61dfb33f05593c1b19dff8a5346dee37a3539d79..95202719a1fd26bd27ea71a2fe85ec1c248e8d13 100644 (file)
@@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry)
        queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
 }
 
-static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
+static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
 {
-       if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
+       if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
                return nfserr_bad_stateid;
        return nfs_ok;
 }
@@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
 {
        __be32 status;
 
-       status = nfs4_check_fh(fhp, ols);
-       if (status)
-               return status;
        status = nfsd4_check_openowner_confirmed(ols);
        if (status)
                return status;
@@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
                status = nfserr_bad_stateid;
                break;
        }
+       if (status)
+               goto out;
+       status = nfs4_check_fh(fhp, s);
 
 done:
        if (!status && filpp)
@@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status)
                return status;
-       return nfs4_check_fh(current_fh, stp);
+       return nfs4_check_fh(current_fh, &stp->st_stid);
 }
 
 /* 
index 54633858733a8da5ac978fe1d19a0a055488e01d..75e0563c09d1911d927501ee52b53a3bd988940e 100644 (file)
@@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
                              FATTR4_WORD0_RDATTR_ERROR)
 #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
+#define WORD2_ABSENT_FS_ATTRS 0
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
 static inline __be32
@@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
 { return 0; }
 #endif
 
-static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
+static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
 {
        /* As per referral draft:  */
        if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
        }
        *bmval0 &= WORD0_ABSENT_FS_ATTRS;
        *bmval1 &= WORD1_ABSENT_FS_ATTRS;
+       *bmval2 &= WORD2_ABSENT_FS_ATTRS;
        return 0;
 }
 
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
        BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
 
        if (exp->ex_fslocs.migrated) {
-               BUG_ON(bmval[2]);
-               status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
+               status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
                if (status)
                        goto out;
        }
@@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
        }
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-       if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
-                       bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+       if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
+            bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
                err = security_inode_getsecctx(d_inode(dentry),
                                                &context, &contextlen);
                contextsupport = (err == 0);
index 92e48c70f0f05542a75804fe7aac752672abd214..39ddcaf0918f145fb3f2cb916d27aa1b866a220e 100644 (file)
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
                                         unsigned int flags)
 {
        struct fsnotify_mark *lmark, *mark;
+       LIST_HEAD(to_free);
 
+       /*
+        * We have to be really careful here. Anytime we drop mark_mutex, e.g.
+        * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
+        * to_free list so we have to use mark_mutex even when accessing that
+        * list. And freeing mark requires us to drop mark_mutex. So we can
+        * reliably free only the first mark in the list. That's why we first
+        * move marks to free to to_free list in one go and then free marks in
+        * to_free list one by one.
+        */
        mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
        list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
-               if (mark->flags & flags) {
-                       fsnotify_get_mark(mark);
-                       fsnotify_destroy_mark_locked(mark, group);
-                       fsnotify_put_mark(mark);
-               }
+               if (mark->flags & flags)
+                       list_move(&mark->g_list, &to_free);
        }
        mutex_unlock(&group->mark_mutex);
+
+       while (1) {
+               mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+               if (list_empty(&to_free)) {
+                       mutex_unlock(&group->mark_mutex);
+                       break;
+               }
+               mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
+               fsnotify_get_mark(mark);
+               fsnotify_destroy_mark_locked(mark, group);
+               mutex_unlock(&group->mark_mutex);
+               fsnotify_put_mark(mark);
+       }
 }
 
 /*
index 1a35c6139656344516aacd59c7120f6fb877f2a2..0f5fd9db8194ef5d135f1896f6e2645a5f059cd8 100644 (file)
@@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
 
        if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
                u64 s = i_size_read(inode);
-               sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
+               sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
                        (do_div(s, osb->s_clustersize) >> 9);
 
                ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
                BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
 
                ret = blkdev_issue_zeroout(osb->sb->s_bdev,
-                               p_cpos << (osb->s_clustersize_bits - 9),
+                               (u64)p_cpos << (osb->s_clustersize_bits - 9),
                                zero_len_head >> 9, GFP_NOFS, false);
                if (ret < 0)
                        mlog_errno(ret);
index 8b23aa2f52ddafe31be83b730d7219693222a1c5..23157e40dd740204bc10f9eaeb55ec08f2f0dfb4 100644 (file)
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
        osb->dc_work_sequence = osb->dc_wake_sequence;
 
        processed = osb->blocked_lock_count;
-       while (processed) {
-               BUG_ON(list_empty(&osb->blocked_lock_list));
-
+       /*
+        * blocked lock processing in this loop might call iput which can
+        * remove items off osb->blocked_lock_list. Downconvert up to
+        * 'processed' number of locks, but stop short if we had some
+        * removed in ocfs2_mark_lockres_freeing when downconverting.
+        */
+       while (processed && !list_empty(&osb->blocked_lock_list)) {
                lockres = list_entry(osb->blocked_lock_list.next,
                                     struct ocfs2_lock_res, l_blocked_list);
                list_del_init(&lockres->l_blocked_list);
index 7e412ad748363489baad12cbb644b8074d78cfeb..270221fcef42cc42fcfdbc098b587b571be65a12 100644 (file)
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
-               if (kinfo->si_code == BUS_MCEERR_AR ||
-                   kinfo->si_code == BUS_MCEERR_AO)
+               if (kinfo->si_signo == SIGBUS &&
+                   (kinfo->si_code == BUS_MCEERR_AR ||
+                    kinfo->si_code == BUS_MCEERR_AO))
                        err |= __put_user((short) kinfo->si_addr_lsb,
                                          &uinfo->ssi_addr_lsb);
 #endif
index 48db6a56975f5ebc874d19054828b70a5f4aaa15..5aa519711e0b6fcb1212a517e50be5afac8ed7d7 100644 (file)
@@ -691,7 +691,7 @@ struct drm_vblank_crtc {
        struct timer_list disable_timer;                /* delayed disable timer */
 
        /* vblank counter, protected by dev->vblank_time_lock for writes */
-       unsigned long count;
+       u32 count;
        /* vblank timestamps, protected by dev->vblank_time_lock for writes */
        struct timeval time[DRM_VBLANKTIME_RBSIZE];
 
index 57ca8cc383a615344498202384b1b814911bc766..3b4d8a4a23fb760867fc7d59ede2a3459eac2375 100644 (file)
@@ -743,8 +743,6 @@ struct drm_connector {
        uint8_t num_h_tile, num_v_tile;
        uint8_t tile_h_loc, tile_v_loc;
        uint16_t tile_h_size, tile_v_size;
-
-       struct list_head destroy_list;
 };
 
 /**
index c8fc187061de5fbd9fc8545f602a62baaa45b8cc..918aa68b5199d54501a2a9d68404e44388e9e04e 100644 (file)
@@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs {
  * @get_modes: get mode list for this connector
  * @mode_valid: is this mode valid on the given connector? (optional)
  * @best_encoder: return the preferred encoder for this connector
+ * @atomic_best_encoder: atomic version of @best_encoder
  *
  * The helper operations are called by the mid-layer CRTC helper.
  */
@@ -176,6 +177,8 @@ struct drm_connector_helper_funcs {
        enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
                                           struct drm_display_mode *mode);
        struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+       struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
+                                                  struct drm_connector_state *connector_state);
 };
 
 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
index 45c39a37f9249562761dc9615ffecf12ec194846..8bc073d297db2a233cf389d6c0656dec78c0445a 100644 (file)
        {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index 6c78956aa47092440edb3a73e7b9389ac3a57558..d2992bfa17063a052a08a106b9105284ce4bfa4a 100644 (file)
@@ -385,8 +385,6 @@ enum {
        SATA_SSP                = 0x06, /* Software Settings Preservation */
        SATA_DEVSLP             = 0x09, /* Device Sleep */
 
-       SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
-
        /* feature values for SET_MAX */
        ATA_SET_MAX_ADDR        = 0x00,
        ATA_SET_MAX_PASSWD      = 0x01,
@@ -530,8 +528,6 @@ struct ata_bmdma_prd {
 #define ata_id_cdb_intr(id)    (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
 #define ata_id_has_da(id)      ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
 #define ata_id_has_devslp(id)  ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
-                               ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
 
 static inline bool ata_id_has_hipm(const u16 *id)
 {
@@ -720,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
        return false;
 }
 
-static inline bool ata_id_has_sense_reporting(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
-}
-
-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
-}
-
 /**
  *     ata_id_major_version    -       get ATA level of drive
  *     @id: Identify data
index c6028fd742c1a18309a323a016e3be076d9b13be..d04aa58280ded5694bbde3638ec602269635a988 100644 (file)
@@ -3,28 +3,43 @@
 
 /* Exponentially weighted moving average (EWMA) */
 
-/* For more documentation see lib/average.c */
-
-struct ewma {
-       unsigned long internal;
-       unsigned long factor;
-       unsigned long weight;
-};
-
-extern void ewma_init(struct ewma *avg, unsigned long factor,
-                     unsigned long weight);
-
-extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
-
-/**
- * ewma_read() - Get average value
- * @avg: Average structure
- *
- * Returns the average value held in @avg.
- */
-static inline unsigned long ewma_read(const struct ewma *avg)
-{
-       return avg->internal >> avg->factor;
-}
+#define DECLARE_EWMA(name, _factor, _weight)                           \
+       struct ewma_##name {                                            \
+               unsigned long internal;                                 \
+       };                                                              \
+       static inline void ewma_##name##_init(struct ewma_##name *e)    \
+       {                                                               \
+               BUILD_BUG_ON(!__builtin_constant_p(_factor));           \
+               BUILD_BUG_ON(!__builtin_constant_p(_weight));           \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_factor);                   \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_weight);                   \
+               e->internal = 0;                                        \
+       }                                                               \
+       static inline unsigned long                                     \
+       ewma_##name##_read(struct ewma_##name *e)                       \
+       {                                                               \
+               BUILD_BUG_ON(!__builtin_constant_p(_factor));           \
+               BUILD_BUG_ON(!__builtin_constant_p(_weight));           \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_factor);                   \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_weight);                   \
+               return e->internal >> ilog2(_factor);                   \
+       }                                                               \
+       static inline void ewma_##name##_add(struct ewma_##name *e,     \
+                                            unsigned long val)         \
+       {                                                               \
+               unsigned long internal = ACCESS_ONCE(e->internal);      \
+               unsigned long weight = ilog2(_weight);                  \
+               unsigned long factor = ilog2(_factor);                  \
+                                                                       \
+               BUILD_BUG_ON(!__builtin_constant_p(_factor));           \
+               BUILD_BUG_ON(!__builtin_constant_p(_weight));           \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_factor);                   \
+               BUILD_BUG_ON_NOT_POWER_OF_2(_weight);                   \
+                                                                       \
+               ACCESS_ONCE(e->internal) = internal ?                   \
+                       (((internal << weight) - internal) +            \
+                               (val << factor)) >> weight :            \
+                       (val << factor);                                \
+       }
 
 #endif /* _LINUX_AVERAGE_H */
index 6cceedf65ca27d787f995980cf716de2d0a2be47..cf038431a5cc8c22246110c716e1d96e09f1a1ed 100644 (file)
@@ -640,7 +640,6 @@ struct bcma_drv_cc {
        spinlock_t gpio_lock;
 #ifdef CONFIG_BCMA_DRIVER_GPIO
        struct gpio_chip gpio;
-       struct irq_domain *irq_domain;
 #endif
 };
 
index 139d6d2e123fb0c69bbe31705b08b637c020ac8d..f57d7fed9ec3f4554609a02538f44c5f950612b6 100644 (file)
@@ -10,6 +10,7 @@
 #include <uapi/linux/bpf.h>
 #include <linux/workqueue.h>
 #include <linux/file.h>
+#include <linux/perf_event.h>
 
 struct bpf_map;
 
@@ -24,6 +25,10 @@ struct bpf_map_ops {
        void *(*map_lookup_elem)(struct bpf_map *map, void *key);
        int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
        int (*map_delete_elem)(struct bpf_map *map, void *key);
+
+       /* funcs called by prog_array and perf_event_array map */
+       void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
+       void (*map_fd_put_ptr) (void *ptr);
 };
 
 struct bpf_map {
@@ -142,13 +147,13 @@ struct bpf_array {
        bool owner_jited;
        union {
                char value[0] __aligned(8);
-               struct bpf_prog *prog[0] __aligned(8);
+               void *ptrs[0] __aligned(8);
        };
 };
 #define MAX_TAIL_CALL_CNT 32
 
 u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-void bpf_prog_array_map_clear(struct bpf_map *map);
+void bpf_fd_array_map_clear(struct bpf_map *map);
 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
 
@@ -185,6 +190,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
 extern const struct bpf_func_proto bpf_map_update_elem_proto;
 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
 
+extern const struct bpf_func_proto bpf_perf_event_read_proto;
 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
 extern const struct bpf_func_proto bpf_tail_call_proto;
index 9012f877520802662fb5f3704c60ef24d09c7136..eb049c622208e3a0815177c528ba9149e6bc8846 100644 (file)
@@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
 
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
        return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
-               ((a[2] ^ b[2]) & m)) == 0;
+               (__force int)((a[2] ^ b[2]) & m)) == 0;
 #else
        return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
 #endif
index cc008c338f5a9bcb66076da96e929d6104697373..84b783f277f761a0ef7b940bd1fbab37db60567e 100644 (file)
@@ -55,7 +55,8 @@ struct vm_fault;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
-extern void __init files_init(unsigned long);
+extern void __init files_init(void);
+extern void __init files_maxfiles_init(void);
 
 extern struct files_stat_struct files_stat;
 extern unsigned long get_max_files(void);
@@ -2245,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
 
 /* fs/dcache.c */
 extern void __init vfs_caches_init_early(void);
-extern void __init vfs_caches_init(unsigned long);
+extern void __init vfs_caches_init(void);
 
 extern struct kmem_cache *names_cachep;
 
index b9c7897dc5668c3fe59c30ab8505d334a6689168..cfa906f28b7a277b480f5bf154bb8cf76e467ad5 100644 (file)
@@ -2074,8 +2074,8 @@ enum ieee80211_tdls_actioncode {
 #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
 #define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED   BIT(7)
 
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED    BIT(5)
 #define WLAN_EXT_CAPA8_OPMODE_NOTIF    BIT(6)
-#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED    BIT(7)
 
 /* TDLS specific payload type in the LLC/SNAP header */
 #define WLAN_TDLS_SNAP_RFTYPE  0x2
index cb9dcad72372150f2ad0d76c229d5f63b07f74ff..f1f32af6d9b96c8d21f55fe6e875673c15e0eab2 100644 (file)
@@ -31,6 +31,7 @@ struct ipv6_devconf {
        __s32           accept_ra_defrtr;
        __s32           accept_ra_min_hop_limit;
        __s32           accept_ra_pinfo;
+       __s32           ignore_routes_with_linkdown;
 #ifdef CONFIG_IPV6_ROUTER_PREF
        __s32           accept_ra_rtr_pref;
        __s32           rtr_probe_interval;
index b943cd9e2097326466919eccc83a25bd93b2ba58..250b1ff8b48d43c0f9f2479e388b405d8238eb41 100644 (file)
@@ -1182,6 +1182,16 @@ enum {
        MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR    = 0x40,
 };
 
+enum {
+       MLX5_IEEE_802_3_COUNTERS_GROUP        = 0x0,
+       MLX5_RFC_2863_COUNTERS_GROUP          = 0x1,
+       MLX5_RFC_2819_COUNTERS_GROUP          = 0x2,
+       MLX5_RFC_3635_COUNTERS_GROUP          = 0x3,
+       MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+       MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
+       MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+};
+
 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
 {
        if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
index 5fe0cae1a515567fb59b42e9b7ba49e9033826f5..8b6d6f2154a4eaab1cce3db487b92d8d1b36d4a9 100644 (file)
@@ -103,6 +103,8 @@ enum {
        MLX5_REG_PMTU            = 0x5003,
        MLX5_REG_PTYS            = 0x5004,
        MLX5_REG_PAOS            = 0x5006,
+       MLX5_REG_PFCC            = 0x5007,
+       MLX5_REG_PPCNT           = 0x5008,
        MLX5_REG_PMAOS           = 0x5012,
        MLX5_REG_PUDE            = 0x5009,
        MLX5_REG_PMPE            = 0x5010,
@@ -151,8 +153,8 @@ enum mlx5_dev_event {
 };
 
 enum mlx5_port_status {
-       MLX5_PORT_UP        = 1 << 1,
-       MLX5_PORT_DOWN      = 1 << 2,
+       MLX5_PORT_UP        = 1,
+       MLX5_PORT_DOWN      = 2,
 };
 
 struct mlx5_uuar_info {
@@ -760,9 +762,10 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
                               u8 local_port);
 int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
                        int proto_mask);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
-                        enum mlx5_port_status status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+                              enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+                                enum mlx5_port_status *status);
 
 int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
 void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
@@ -772,6 +775,10 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
                              u8 *vl_hw_cap, u8 local_port);
 
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+                         u32 *rx_pause, u32 *tx_pause);
+
 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
index c60a62bba652c112517abefdaf60433437780601..dd2097455a2e3b54cbb69c409bbe8fa7463264ec 100644 (file)
@@ -4050,6 +4050,13 @@ struct mlx5_ifc_modify_tis_in_bits {
        struct mlx5_ifc_tisc_bits ctx;
 };
 
+struct mlx5_ifc_modify_tir_bitmask_bits {
+       u8         reserved[0x20];
+
+       u8         reserved1[0x1f];
+       u8         lro[0x1];
+};
+
 struct mlx5_ifc_modify_tir_out_bits {
        u8         status[0x8];
        u8         reserved_0[0x18];
@@ -4071,7 +4078,7 @@ struct mlx5_ifc_modify_tir_in_bits {
 
        u8         reserved_3[0x20];
 
-       u8         modify_bitmask[0x40];
+       struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
 
        u8         reserved_4[0x40];
 
@@ -4116,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits {
        u8         reserved_1[0x40];
 };
 
+struct mlx5_ifc_rqt_bitmask_bits {
+       u8         reserved[0x20];
+
+       u8         reserved1[0x1f];
+       u8         rqn_list[0x1];
+};
+
 struct mlx5_ifc_modify_rqt_in_bits {
        u8         opcode[0x10];
        u8         reserved_0[0x10];
@@ -4128,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits {
 
        u8         reserved_3[0x20];
 
-       u8         modify_bitmask[0x40];
+       struct mlx5_ifc_rqt_bitmask_bits bitmask;
 
        u8         reserved_4[0x40];
 
index 04aa06852771e478029bc82367ab8e509ad2dd56..049d4b03c4c4d50bb6292300818a0cf3d126c0ca 100644 (file)
@@ -239,8 +239,16 @@ do {                                                               \
        net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
 #define net_info_ratelimited(fmt, ...)                         \
        net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#if defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)                          \
        net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+#else
+#define net_dbg_ratelimited(fmt, ...)                          \
+       do {                                                    \
+               if (0)                                          \
+                       no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+       } while (0)
+#endif
 
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
                           struct static_key *done_key);
index 607b5f41f46f93e506adbefd7b3ed11ed8acfb67..6abe0d6f1e1d4b7db86285fd2ff370e1bb67d123 100644 (file)
@@ -1262,6 +1262,8 @@ struct net_device_ops {
  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
  *     change when it's running
  * @IFF_MACVLAN: Macvlan device
+ * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_NO_QUEUE: device can run without qdisc attached
  */
 enum netdev_priv_flags {
        IFF_802_1Q_VLAN                 = 1<<0,
@@ -1289,6 +1291,8 @@ enum netdev_priv_flags {
        IFF_XMIT_DST_RELEASE_PERM       = 1<<22,
        IFF_IPVLAN_MASTER               = 1<<23,
        IFF_IPVLAN_SLAVE                = 1<<24,
+       IFF_VRF_MASTER                  = 1<<25,
+       IFF_NO_QUEUE                    = 1<<26,
 };
 
 #define IFF_802_1Q_VLAN                        IFF_802_1Q_VLAN
@@ -1316,6 +1320,8 @@ enum netdev_priv_flags {
 #define IFF_XMIT_DST_RELEASE_PERM      IFF_XMIT_DST_RELEASE_PERM
 #define IFF_IPVLAN_MASTER              IFF_IPVLAN_MASTER
 #define IFF_IPVLAN_SLAVE               IFF_IPVLAN_SLAVE
+#define IFF_VRF_MASTER                 IFF_VRF_MASTER
+#define IFF_NO_QUEUE                   IFF_NO_QUEUE
 
 /**
  *     struct net_device - The DEVICE structure.
@@ -1432,6 +1438,7 @@ enum netdev_priv_flags {
  *     @dn_ptr:        DECnet specific data
  *     @ip6_ptr:       IPv6 specific data
  *     @ax25_ptr:      AX.25 specific data
+ *     @vrf_ptr:       VRF specific data
  *     @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
  *
  *     @last_rx:       Time of last Rx
@@ -1650,6 +1657,7 @@ struct net_device {
        struct dn_dev __rcu     *dn_ptr;
        struct inet6_dev __rcu  *ip6_ptr;
        void                    *ax25_ptr;
+       struct net_vrf_dev __rcu *vrf_ptr;
        struct wireless_dev     *ieee80211_ptr;
        struct wpan_dev         *ieee802154_ptr;
 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -2303,8 +2311,7 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
 
 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
 {
-       return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
-               skb_gro_offset(skb));
+       return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
 }
 
 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2400,37 +2407,58 @@ static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
        grc->delta = 0;
 }
 
-static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
-                                          int start, int offset,
-                                          struct gro_remcsum *grc,
-                                          bool nopartial)
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+                                           unsigned int off, size_t hdrlen,
+                                           int start, int offset,
+                                           struct gro_remcsum *grc,
+                                           bool nopartial)
 {
        __wsum delta;
+       size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
        BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
 
        if (!nopartial) {
-               NAPI_GRO_CB(skb)->gro_remcsum_start =
-                   ((unsigned char *)ptr + start) - skb->head;
-               return;
+               NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+               return ptr;
+       }
+
+       ptr = skb_gro_header_fast(skb, off);
+       if (skb_gro_header_hard(skb, off + plen)) {
+               ptr = skb_gro_header_slow(skb, off + plen, off);
+               if (!ptr)
+                       return NULL;
        }
 
-       delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+       delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+                              start, offset);
 
        /* Adjust skb->csum since we changed the packet */
        NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
 
-       grc->offset = (ptr + offset) - (void *)skb->head;
+       grc->offset = off + hdrlen + offset;
        grc->delta = delta;
+
+       return ptr;
 }
 
 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
                                           struct gro_remcsum *grc)
 {
+       void *ptr;
+       size_t plen = grc->offset + sizeof(u16);
+
        if (!grc->delta)
                return;
 
-       remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+       ptr = skb_gro_header_fast(skb, grc->offset);
+       if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+               ptr = skb_gro_header_slow(skb, plen, grc->offset);
+               if (!ptr)
+                       return;
+       }
+
+       remcsum_unadjust((__sum16 *)ptr, grc->delta);
 }
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -3808,6 +3836,32 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
        return dev->priv_flags & IFF_SUPP_NOFCS;
 }
 
+static inline bool netif_is_vrf(const struct net_device *dev)
+{
+       return dev->priv_flags & IFF_VRF_MASTER;
+}
+
+static inline bool netif_index_is_vrf(struct net *net, int ifindex)
+{
+       bool rc = false;
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+       struct net_device *dev;
+
+       if (ifindex == 0)
+               return false;
+
+       rcu_read_lock();
+
+       dev = dev_get_by_index_rcu(net, ifindex);
+       if (dev)
+               rc = netif_is_vrf(dev);
+
+       rcu_read_unlock();
+#endif
+       return rc;
+}
+
 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
 static inline void netif_keep_dst(struct net_device *dev)
 {
index 6ec975748742793fd51c274314a208ea5cb697db..80ca889b164e3eab5b42c7249d264f55650e44bf 100644 (file)
@@ -2,6 +2,7 @@
 #define _NFNL_ACCT_H_
 
 #include <uapi/linux/netfilter/nfnetlink_acct.h>
+#include <net/net_namespace.h>
 
 enum {
        NFACCT_NO_QUOTA         = -1,
@@ -11,7 +12,7 @@ enum {
 
 struct nf_acct;
 
-struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
 void nfnl_acct_put(struct nf_acct *acct);
 void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
 extern int nfnl_acct_overquota(const struct sk_buff *skb,
index f34e040b34e9ffbf0abe66b439571ec8902440ca..41c93844fb1d1ed5c0dbad77fe5a409557d66067 100644 (file)
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
         1 << PG_private | 1 << PG_private_2 | \
         1 << PG_writeback | 1 << PG_reserved | \
         1 << PG_slab    | 1 << PG_swapcache | 1 << PG_active | \
-        1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
+        1 << PG_unevictable | __PG_MLOCKED | \
         __PG_COMPOUND_LOCK)
 
 /*
  * Flags checked when a page is prepped for return by the page allocator.
- * Pages being prepped should not have any flags set.  It they are set,
+ * Pages being prepped should not have these flags set.  It they are set,
  * there has been a kernel bug or struct page corruption.
+ *
+ * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
+ * alloc-free cycle to prevent from reusing the page.
  */
-#define PAGE_FLAGS_CHECK_AT_PREP       ((1 << NR_PAGEFLAGS) - 1)
+#define PAGE_FLAGS_CHECK_AT_PREP       \
+       (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
 
 #define PAGE_FLAGS_PRIVATE                             \
        (1 << PG_private | 1 << PG_private_2)
index 2027809433b3c86b77b832d2e7a5311021e45db3..092a0e8a479aa19569fa0704e050a853bc38883c 100644 (file)
@@ -641,6 +641,8 @@ extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
 extern void perf_event_delayed_put(struct task_struct *task);
+extern struct perf_event *perf_event_get(unsigned int fd);
+extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
 extern void perf_event_print_debug(void);
 extern void perf_pmu_disable(struct pmu *pmu);
 extern void perf_pmu_enable(struct pmu *pmu);
@@ -659,6 +661,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
                                void *context);
 extern void perf_pmu_migrate_context(struct pmu *pmu,
                                int src_cpu, int dst_cpu);
+extern u64 perf_event_read_local(struct perf_event *event);
 extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
 
@@ -979,6 +982,12 @@ static inline int perf_event_init_task(struct task_struct *child)  { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
 static inline void perf_event_delayed_put(struct task_struct *task)    { }
+static inline struct perf_event *perf_event_get(unsigned int fd)       { return ERR_PTR(-EINVAL); }
+static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+       return ERR_PTR(-EINVAL);
+}
+static inline u64 perf_event_read_local(struct perf_event *event)      { return -EINVAL; }
 static inline void perf_event_print_debug(void)                                { }
 static inline int perf_event_task_disable(void)                                { return -EINVAL; }
 static inline int perf_event_task_enable(void)                         { return -EINVAL; }
@@ -1011,6 +1020,7 @@ static inline void perf_event_enable(struct perf_event *event)            { }
 static inline void perf_event_disable(struct perf_event *event)                { }
 static inline int __perf_event_disable(void *info)                     { return -1; }
 static inline void perf_event_task_tick(void)                          { }
+static inline int perf_event_release_kernel(struct perf_event *event)  { return 0; }
 #endif
 
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
deleted file mode 100644 (file)
index d9d400a..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Driver include for ST NCI NFC chip family.
- *
- * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ST_NCI_H_
-#define _ST_NCI_H_
-
-#define ST_NCI_DRIVER_NAME "st_nci"
-
-struct st_nci_nfc_platform_data {
-       unsigned int gpio_reset;
-       unsigned int irq_polarity;
-};
-
-#endif /* _ST_NCI_H_ */
index 76ebde9c11d483fe3a177b049e2774550e7b2bcc..a59c6ee566c2cf908ca7c6ec5512768e90d4a3b0 100644 (file)
@@ -166,4 +166,8 @@ void device_add_property_set(struct device *dev, struct property_set *pset);
 
 bool device_dma_is_coherent(struct device *dev);
 
+int device_get_phy_mode(struct device *dev);
+
+void *device_get_mac_address(struct device *dev, char *addr, int alen);
+
 #endif /* _LINUX_PROPERTY_H_ */
index df9fdf5576896e841bec213ebc6d25aee2f6f0fb..065e10b81a7728be5ee2023cb03f4c4c9b7078d7 100644 (file)
@@ -2913,11 +2913,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
  *
  * PHY drivers may accept clones of transmitted packets for
  * timestamping via their phy_driver.txtstamp method. These drivers
- * must call this function to return the skb back to the stack, with
- * or without a timestamp.
+ * must call this function to return the skb back to the stack with a
+ * timestamp.
  *
  * @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps, may be NULL if not available
+ * @hwtstamps: hardware time stamps
  *
  */
 void skb_complete_tx_timestamp(struct sk_buff *skb,
index dc03d77ad23bb2588869251a67c5a7143c874b21..a2f59ec98d24a1af1e8de056305b563b2ff94df7 100644 (file)
 #define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
 #define LOWPAN_NHC_UDP_CS_C    0x04 /* checksum elided */
 
+#define LOWPAN_PRIV_SIZE(llpriv_size)  \
+       (sizeof(struct lowpan_priv) + llpriv_size)
+
+enum lowpan_lltypes {
+       LOWPAN_LLTYPE_BTLE,
+       LOWPAN_LLTYPE_IEEE802154,
+};
+
+struct lowpan_priv {
+       enum lowpan_lltypes lltype;
+
+       /* must be last */
+       u8 priv[0] __aligned(sizeof(void *));
+};
+
+static inline
+struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+{
+       return netdev_priv(dev);
+}
+
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
@@ -372,6 +393,8 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
        return skb->len + uncomp_header - ret;
 }
 
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
+
 int
 lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
                         const u8 *saddr, const u8 saddr_type,
index 2a6b0919e23f71af5f4660fce0a349bfa09b2fd9..9e1a59e01fa2f6316ae950833974aa628761cf51 100644 (file)
@@ -512,9 +512,11 @@ struct hci_conn_params {
                HCI_AUTO_CONN_DIRECT,
                HCI_AUTO_CONN_ALWAYS,
                HCI_AUTO_CONN_LINK_LOSS,
+               HCI_AUTO_CONN_EXPLICIT,
        } auto_connect;
 
        struct hci_conn *conn;
+       bool explicit_connect;
 };
 
 extern struct list_head hci_dev_list;
@@ -639,6 +641,7 @@ enum {
        HCI_CONN_DROP,
        HCI_CONN_PARAM_REMOVAL_PEND,
        HCI_CONN_NEW_LINK_KEY,
+       HCI_CONN_SCANNING,
 };
 
 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -808,6 +811,26 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
        return NULL;
 }
 
+static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
+{
+       struct hci_conn_hash *h = &hdev->conn_hash;
+       struct hci_conn  *c;
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(c, &h->list, list) {
+               if (c->type == LE_LINK && c->state == BT_CONNECT &&
+                   !test_bit(HCI_CONN_SCANNING, &c->flags)) {
+                       rcu_read_unlock();
+                       return c;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return NULL;
+}
+
 int hci_disconnect(struct hci_conn *conn, __u8 reason);
 bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
@@ -823,6 +846,9 @@ void hci_chan_del(struct hci_chan *chan);
 void hci_chan_list_flush(struct hci_conn *conn);
 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+                                    u8 dst_type, u8 sec_level,
+                                    u16 conn_timeout, u8 role);
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                                u8 dst_type, u8 sec_level, u16 conn_timeout,
                                u8 role);
@@ -988,6 +1014,9 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
                                                  bdaddr_t *addr,
                                                  u8 addr_type);
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+                                                   bdaddr_t *addr,
+                                                   u8 addr_type);
 
 void hci_uuids_clear(struct hci_dev *hdev);
 
index 883fe1e7c5a17e982651a68cfb17cbc0bbe5b367..f0889a2476439553f055b31b6c577b6eb03e4d55 100644 (file)
@@ -2369,8 +2369,7 @@ struct cfg80211_qos_map {
  *     method returns 0.)
  *
  * @mgmt_frame_register: Notify driver that a management frame type was
- *     registered. Note that this callback may not sleep, and cannot run
- *     concurrently with itself.
+ *     registered. The callback is allowed to sleep.
  *
  * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
  *     Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
index 382f94b59f2f706eab23f2f6ebe9c2f007c5ed2e..76b1ffaea863600be09280cc4c2d168dc5bae41f 100644 (file)
@@ -63,6 +63,8 @@ struct cfg802154_ops {
                                         s8 max_frame_retries);
        int     (*set_lbt_mode)(struct wpan_phy *wpan_phy,
                                struct wpan_dev *wpan_dev, bool mode);
+       int     (*set_ackreq_default)(struct wpan_phy *wpan_phy,
+                                     struct wpan_dev *wpan_dev, bool ackreq);
 };
 
 static inline bool
@@ -173,6 +175,9 @@ struct wpan_dev {
        struct list_head list;
        struct net_device *netdev;
 
+       /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
+       struct net_device *lowpan_dev;
+
        u32 identifier;
 
        /* MAC PIB */
@@ -193,6 +198,9 @@ struct wpan_dev {
        bool lbt;
 
        bool promiscuous_mode;
+
+       /* fallback for acknowledgment bit setting */
+       bool ackreq;
 };
 
 #define to_phy(_dev)   container_of(_dev, struct wpan_phy, dev)
index 2d1d73cb773e9bd160f5c43bb58981ee8597b87c..9fcaedf994ee2ba5db20a0635c78ceed06f57cc1 100644 (file)
@@ -140,14 +140,16 @@ static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
 
 struct sk_buff;
 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
-                             __be32 from, __be32 to, int pseudohdr);
+                             __be32 from, __be32 to, bool pseudohdr);
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
                               const __be32 *from, const __be32 *to,
-                              int pseudohdr);
+                              bool pseudohdr);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+                                    __wsum diff, bool pseudohdr);
 
 static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
                                            __be16 from, __be16 to,
-                                           int pseudohdr)
+                                           bool pseudohdr)
 {
        inet_proto_csum_replace4(sum, skb, (__force __be32)from,
                                 (__force __be32)to, pseudohdr);
index fbca63ba8f733fd37fa300bbbe33a300ab05fd49..b34d812bc5d056d47b7d39181668572cde4f53ca 100644 (file)
@@ -171,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
        return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
 }
 
+static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
+{
+       return !!((ds->dsa_port_mask) & (1 << p));
+}
+
 static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
 {
        return ds->phys_port_mask & (1 << p) && ds->ports[p];
@@ -296,12 +301,28 @@ struct dsa_switch_driver {
                                     u32 br_port_mask);
        int     (*port_stp_update)(struct dsa_switch *ds, int port,
                                   u8 state);
-       int     (*fdb_add)(struct dsa_switch *ds, int port,
-                          const unsigned char *addr, u16 vid);
-       int     (*fdb_del)(struct dsa_switch *ds, int port,
-                          const unsigned char *addr, u16 vid);
-       int     (*fdb_getnext)(struct dsa_switch *ds, int port,
-                              unsigned char *addr, bool *is_static);
+
+       /*
+        * VLAN support
+        */
+       int     (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid);
+       int     (*port_pvid_set)(struct dsa_switch *ds, int port, u16 pvid);
+       int     (*port_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
+                                bool untagged);
+       int     (*port_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
+       int     (*vlan_getnext)(struct dsa_switch *ds, u16 *vid,
+                               unsigned long *ports, unsigned long *untagged);
+
+       /*
+        * Forwarding database
+        */
+       int     (*port_fdb_add)(struct dsa_switch *ds, int port,
+                               const unsigned char *addr, u16 vid);
+       int     (*port_fdb_del)(struct dsa_switch *ds, int port,
+                               const unsigned char *addr, u16 vid);
+       int     (*port_fdb_getnext)(struct dsa_switch *ds, int port,
+                                   unsigned char *addr, u16 *vid,
+                                   bool *is_static);
 };
 
 void register_switch_driver(struct dsa_switch_driver *type);
index 2578811cef5167e94269bb9967f6b025c824084a..ef8f1d43a2033bc67ab3c8f3b08657191463654e 100644 (file)
@@ -84,12 +84,13 @@ struct dst_entry {
        __u32                   __pad2;
 #endif
 
+#ifdef CONFIG_64BIT
+       struct lwtunnel_state   *lwtstate;
        /*
         * Align __refcnt to a 64 bytes alignment
         * (L1_CACHE_SIZE would be too much)
         */
-#ifdef CONFIG_64BIT
-       long                    __pad_to_align_refcnt[2];
+       long                    __pad_to_align_refcnt[1];
 #endif
        /*
         * __refcnt wants to be on a different cache line from
@@ -98,6 +99,9 @@ struct dst_entry {
        atomic_t                __refcnt;       /* client references    */
        int                     __use;
        unsigned long           lastuse;
+#ifndef CONFIG_64BIT
+       struct lwtunnel_state   *lwtstate;
+#endif
        union {
                struct dst_entry        *next;
                struct rtable __rcu     *rt_next;
index 075f523ff23f44f471261b8539a4f7e219b0e92f..2cb52d562272aa395be9186f676c4dbcaf3c1462 100644 (file)
@@ -23,22 +23,17 @@ static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
        return NULL;
 }
 
-static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb,
-                                                    int family)
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
 {
        struct metadata_dst *md_dst = skb_metadata_dst(skb);
-       struct rtable *rt;
+       struct dst_entry *dst;
 
        if (md_dst)
                return &md_dst->u.tun_info;
 
-       switch (family) {
-       case AF_INET:
-               rt = (struct rtable *)skb_dst(skb);
-               if (rt && rt->rt_lwtstate)
-                       return lwt_tun_info(rt->rt_lwtstate);
-               break;
-       }
+       dst = skb_dst(skb);
+       if (dst && dst->lwtstate)
+               return lwt_tun_info(dst->lwtstate);
 
        return NULL;
 }
index 3098ae33a1784f920e26dd445a041e9deac1888e..9e0297c4c11da8f1a1332f459c428c705c686bf0 100644 (file)
@@ -33,6 +33,7 @@ struct flowi_common {
        __u8    flowic_flags;
 #define FLOWI_FLAG_ANYSRC              0x01
 #define FLOWI_FLAG_KNOWN_NH            0x02
+#define FLOWI_FLAG_VRFSRC              0x04
        __u32   flowic_secid;
        struct flowi_tunnel flowic_tun_key;
 };
@@ -129,6 +130,7 @@ struct flowi6 {
 #define flowi6_proto           __fl_common.flowic_proto
 #define flowi6_flags           __fl_common.flowic_flags
 #define flowi6_secid           __fl_common.flowic_secid
+#define flowi6_tun_key         __fl_common.flowic_tun_key
        struct in6_addr         daddr;
        struct in6_addr         saddr;
        __be32                  flowlabel;
index b53182018743f4383e525c43f30fc25bd5b4f5a8..97eafdc47eea118f6122b4a51a790d93a3830f86 100644 (file)
@@ -4,6 +4,12 @@
 #include <linux/skbuff.h>
 #include <net/ip_tunnels.h>
 
+struct gre_base_hdr {
+       __be16 flags;
+       __be16 protocol;
+};
+#define GRE_HEADER_SECTION 4
+
 #define GREPROTO_CISCO         0
 #define GREPROTO_PPTP          1
 #define GREPROTO_MAX           2
@@ -14,91 +20,9 @@ struct gre_protocol {
        void (*err_handler)(struct sk_buff *skb, u32 info);
 };
 
-struct gre_base_hdr {
-       __be16 flags;
-       __be16 protocol;
-};
-#define GRE_HEADER_SECTION 4
-
 int gre_add_protocol(const struct gre_protocol *proto, u8 version);
 int gre_del_protocol(const struct gre_protocol *proto, u8 version);
 
-struct gre_cisco_protocol {
-       int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
-       int (*err_handler)(struct sk_buff *skb, u32 info,
-                          const struct tnl_ptk_info *tpi);
-       u8 priority;
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *proto);
-int gre_cisco_unregister(struct gre_cisco_protocol *proto);
-
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
-                     int hdr_len);
-
-static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
-                                                 bool csum)
-{
-       return iptunnel_handle_offloads(skb, csum,
-                                       csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
-}
-
-
-static inline int ip_gre_calc_hlen(__be16 o_flags)
-{
-       int addend = 4;
-
-       if (o_flags&TUNNEL_CSUM)
-               addend += 4;
-       if (o_flags&TUNNEL_KEY)
-               addend += 4;
-       if (o_flags&TUNNEL_SEQ)
-               addend += 4;
-       return addend;
-}
-
-static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
-{
-       __be16 tflags = 0;
-
-       if (flags & GRE_CSUM)
-               tflags |= TUNNEL_CSUM;
-       if (flags & GRE_ROUTING)
-               tflags |= TUNNEL_ROUTING;
-       if (flags & GRE_KEY)
-               tflags |= TUNNEL_KEY;
-       if (flags & GRE_SEQ)
-               tflags |= TUNNEL_SEQ;
-       if (flags & GRE_STRICT)
-               tflags |= TUNNEL_STRICT;
-       if (flags & GRE_REC)
-               tflags |= TUNNEL_REC;
-       if (flags & GRE_VERSION)
-               tflags |= TUNNEL_VERSION;
-
-       return tflags;
-}
-
-static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
-{
-       __be16 flags = 0;
-
-       if (tflags & TUNNEL_CSUM)
-               flags |= GRE_CSUM;
-       if (tflags & TUNNEL_ROUTING)
-               flags |= GRE_ROUTING;
-       if (tflags & TUNNEL_KEY)
-               flags |= GRE_KEY;
-       if (tflags & TUNNEL_SEQ)
-               flags |= GRE_SEQ;
-       if (tflags & TUNNEL_STRICT)
-               flags |= GRE_STRICT;
-       if (tflags & TUNNEL_REC)
-               flags |= GRE_REC;
-       if (tflags & TUNNEL_VERSION)
-               flags |= GRE_VERSION;
-
-       return flags;
-}
-
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+                                      u8 name_assign_type);
 #endif
index 276328e3daa64a0d493a141e5690eb6dfe18c805..063d30474cf66077a7491ac8efbf4d400070c49e 100644 (file)
@@ -133,7 +133,6 @@ struct rt6_info {
        /* more non-fragment space at head required */
        unsigned short                  rt6i_nfheader_len;
        u8                              rt6i_protocol;
-       struct lwtunnel_state           *rt6i_lwtstate;
 };
 
 static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
index 47984415f5d1e7758d6a1c8b0feaeb55f902a7dd..224e4ecec91b71eb626e8f37e3785e6ded52c92c 100644 (file)
 #define IPTUNNEL_ERR_TIMEO     (30*HZ)
 
 /* Used to memset ip_tunnel padding. */
-#define IP_TUNNEL_KEY_SIZE                                     \
-       (offsetof(struct ip_tunnel_key, tp_dst) +               \
-        FIELD_SIZEOF(struct ip_tunnel_key, tp_dst))
+#define IP_TUNNEL_KEY_SIZE     offsetofend(struct ip_tunnel_key, tp_dst)
+
+/* Used to memset ipv4 address padding. */
+#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
+#define IP_TUNNEL_KEY_IPV4_PAD_LEN                             \
+       (FIELD_SIZEOF(struct ip_tunnel_key, u) -                \
+        FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
 
 struct ip_tunnel_key {
        __be64                  tun_id;
-       __be32                  ipv4_src;
-       __be32                  ipv4_dst;
+       union {
+               struct {
+                       __be32  src;
+                       __be32  dst;
+               } ipv4;
+               struct {
+                       struct in6_addr src;
+                       struct in6_addr dst;
+               } ipv6;
+       } u;
        __be16                  tun_flags;
-       __u8                    ipv4_tos;
-       __u8                    ipv4_ttl;
+       u8                      tos;            /* TOS for IPv4, TC for IPv6 */
+       u8                      ttl;            /* TTL for IPv4, HL for IPv6 */
        __be16                  tp_src;
        __be16                  tp_dst;
-} __packed __aligned(4); /* Minimize padding. */
+};
 
 /* Indicates whether the tunnel info structure represents receive
  * or transmit tunnel parameters.
@@ -64,8 +76,8 @@ struct ip_tunnel_6rd_parm {
 #endif
 
 struct ip_tunnel_encap {
-       __u16                   type;
-       __u16                   flags;
+       u16                     type;
+       u16                     flags;
        __be16                  sport;
        __be16                  dport;
 };
@@ -82,6 +94,8 @@ struct ip_tunnel_dst {
        __be32                           saddr;
 };
 
+struct metadata_dst;
+
 struct ip_tunnel {
        struct ip_tunnel __rcu  *next;
        struct hlist_node hash_node;
@@ -93,8 +107,8 @@ struct ip_tunnel {
                                         * arrived */
 
        /* These four fields used only by GRE */
-       __u32           i_seqno;        /* The last seen seqno  */
-       __u32           o_seqno;        /* The last output seqno */
+       u32             i_seqno;        /* The last seen seqno  */
+       u32             o_seqno;        /* The last output seqno */
        int             tun_hlen;       /* Precalculated header length */
        int             mlink;
 
@@ -115,6 +129,7 @@ struct ip_tunnel {
        unsigned int            prl_count;      /* # of entries in PRL */
        int                     ip_tnl_net_id;
        struct gro_cells        gro_cells;
+       bool                    collect_md;
 };
 
 #define TUNNEL_CSUM            __cpu_to_be16(0x01)
@@ -149,6 +164,7 @@ struct tnl_ptk_info {
 struct ip_tunnel_net {
        struct net_device *fb_tunnel_dev;
        struct hlist_head tunnels[IP_TNL_HASH_SIZE];
+       struct ip_tunnel __rcu *collect_md_tun;
 };
 
 struct ip_tunnel_encap_ops {
@@ -175,10 +191,12 @@ static inline void __ip_tunnel_info_init(struct ip_tunnel_info *tun_info,
                                         const void *opts, u8 opts_len)
 {
        tun_info->key.tun_id = tun_id;
-       tun_info->key.ipv4_src = saddr;
-       tun_info->key.ipv4_dst = daddr;
-       tun_info->key.ipv4_tos = tos;
-       tun_info->key.ipv4_ttl = ttl;
+       tun_info->key.u.ipv4.src = saddr;
+       tun_info->key.u.ipv4.dst = daddr;
+       memset((unsigned char *)&tun_info->key + IP_TUNNEL_KEY_IPV4_PAD,
+              0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
+       tun_info->key.tos = tos;
+       tun_info->key.ttl = ttl;
        tun_info->key.tun_flags = tun_flags;
 
        /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
@@ -235,7 +253,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
                                   __be32 key);
 
 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
-                 const struct tnl_ptk_info *tpi, bool log_ecn_error);
+                 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+                 bool log_ecn_error);
 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
                         struct ip_tunnel_parm *p);
 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
@@ -268,8 +287,8 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
 
 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
-                 __be32 src, __be32 dst, __u8 proto,
-                 __u8 tos, __u8 ttl, __be16 df, bool xnet);
+                 __be32 src, __be32 dst, u8 proto,
+                 u8 tos, u8 ttl, __be16 df, bool xnet);
 
 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
                                         int gso_type_mask);
index 33bd30963a95a529353da82309361743d30bc8cf..fce0e35e74d075cd77f23a84efc6c36885c93ba9 100644 (file)
 #define LWTUNNEL_HASH_SIZE   (1 << LWTUNNEL_HASH_BITS)
 
 /* lw tunnel state flags */
-#define LWTUNNEL_STATE_OUTPUT_REDIRECT 0x1
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
+#define LWTUNNEL_STATE_INPUT_REDIRECT  BIT(1)
 
 struct lwtunnel_state {
        __u16           type;
        __u16           flags;
        atomic_t        refcnt;
+       int             (*orig_output)(struct sock *sk, struct sk_buff *skb);
+       int             (*orig_input)(struct sk_buff *);
        int             len;
        __u8            data[0];
 };
 
 struct lwtunnel_encap_ops {
        int (*build_state)(struct net_device *dev, struct nlattr *encap,
+                          unsigned int family, const void *cfg,
                           struct lwtunnel_state **ts);
        int (*output)(struct sock *sk, struct sk_buff *skb);
+       int (*input)(struct sk_buff *skb);
        int (*fill_encap)(struct sk_buff *skb,
                          struct lwtunnel_state *lwtstate);
        int (*get_encap_size)(struct lwtunnel_state *lwtstate);
@@ -32,6 +37,11 @@ struct lwtunnel_encap_ops {
 };
 
 #ifdef CONFIG_LWTUNNEL
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+       kfree(lws);
+}
+
 static inline struct lwtunnel_state *
 lwtstate_get(struct lwtunnel_state *lws)
 {
@@ -47,7 +57,7 @@ static inline void lwtstate_put(struct lwtunnel_state *lws)
                return;
 
        if (atomic_dec_and_test(&lws->refcnt))
-               kfree(lws);
+               lwtstate_free(lws);
 }
 
 static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
@@ -58,12 +68,20 @@ static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
        return false;
 }
 
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+       if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT))
+               return true;
+
+       return false;
+}
 int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
                           unsigned int num);
 int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
                           unsigned int num);
 int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
                         struct nlattr *encap,
+                        unsigned int family, const void *cfg,
                         struct lwtunnel_state **lws);
 int lwtunnel_fill_encap(struct sk_buff *skb,
                        struct lwtunnel_state *lwtstate);
@@ -71,10 +89,14 @@ int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
 struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
 int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
 int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
-int lwtunnel_output6(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_input(struct sk_buff *skb);
 
 #else
 
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+}
+
 static inline struct lwtunnel_state *
 lwtstate_get(struct lwtunnel_state *lws)
 {
@@ -90,6 +112,11 @@ static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
        return false;
 }
 
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+       return false;
+}
+
 static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
                                         unsigned int num)
 {
@@ -105,6 +132,7 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
 
 static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
                                       struct nlattr *encap,
+                                      unsigned int family, const void *cfg,
                                       struct lwtunnel_state **lws)
 {
        return -EOPNOTSUPP;
@@ -137,7 +165,7 @@ static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
        return -EOPNOTSUPP;
 }
 
-static inline int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+static inline int lwtunnel_input(struct sk_buff *skb)
 {
        return -EOPNOTSUPP;
 }
index 6b1077c2a63faaaafe60a7d080024662daeb8f54..e3314e516681ed0733ec212d7464c393222428ef 100644 (file)
@@ -973,6 +973,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
  *     If this flag is set, the stack cannot do any replay detection
  *     hence the driver or hardware will have to do that.
+ * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
+ *     flag indicates that the PN was verified for replay protection.
+ *     Note that this flag is also currently only supported when a frame
+ *     is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
  * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
  *     the frame.
  * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -997,9 +1001,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
  *     number (@ampdu_reference) must be populated and be a distinct number for
  *     each A-MPDU
- * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
- * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
- *     monitoring purposes only
  * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
  *     subframes of a single A-MPDU
  * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
@@ -1039,8 +1040,8 @@ enum mac80211_rx_flags {
        RX_FLAG_NO_SIGNAL_VAL           = BIT(12),
        RX_FLAG_HT_GF                   = BIT(13),
        RX_FLAG_AMPDU_DETAILS           = BIT(14),
-       RX_FLAG_AMPDU_REPORT_ZEROLEN    = BIT(15),
-       RX_FLAG_AMPDU_IS_ZEROLEN        = BIT(16),
+       RX_FLAG_PN_VALIDATED            = BIT(15),
+       /* bit 16 free */
        RX_FLAG_AMPDU_LAST_KNOWN        = BIT(17),
        RX_FLAG_AMPDU_IS_LAST           = BIT(18),
        RX_FLAG_AMPDU_DELIM_CRC_ERROR   = BIT(19),
@@ -1491,8 +1492,10 @@ enum ieee80211_key_flags {
  *     - Temporal Authenticator Rx MIC Key (64 bits)
  * @icv_len: The ICV length for this key type
  * @iv_len: The IV length for this key type
+ * @drv_priv: pointer for driver use
  */
 struct ieee80211_key_conf {
+       void *drv_priv;
        atomic64_t tx_pn;
        u32 cipher;
        u8 icv_len;
@@ -1675,7 +1678,6 @@ struct ieee80211_sta_rates {
  * @tdls: indicates whether the STA is a TDLS peer
  * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
  *     valid if the STA is a TDLS peer in the first place.
- * @mfp: indicates whether the STA uses management frame protection or not.
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
  */
 struct ieee80211_sta {
@@ -1693,7 +1695,6 @@ struct ieee80211_sta {
        struct ieee80211_sta_rates __rcu *rates;
        bool tdls;
        bool tdls_initiator;
-       bool mfp;
 
        struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
 
@@ -1888,6 +1889,9 @@ struct ieee80211_txq {
  * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
  *     in one command, mac80211 doesn't have to run separate scans per band.
  *
+ * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
+ *     than then BSS bandwidth for a TDLS link on the base channel.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -1920,6 +1924,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_CHANCTX_STA_CSA,
        IEEE80211_HW_SUPPORTS_CLONED_SKBS,
        IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+       IEEE80211_HW_TDLS_WIDER_BW,
 
        /* keep last, obviously */
        NUM_IEEE80211_HW_FLAGS
@@ -3696,20 +3701,28 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
 void ieee80211_restart_hw(struct ieee80211_hw *hw);
 
 /**
- * ieee80211_napi_add - initialize mac80211 NAPI context
- * @hw: the hardware to initialize the NAPI context on
- * @napi: the NAPI context to initialize
- * @napi_dev: dummy NAPI netdevice, here to not waste the space if the
- *     driver doesn't use NAPI
- * @poll: poll function
- * @weight: default weight
+ * ieee80211_rx_napi - receive frame from NAPI context
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled.
  *
- * See also netif_napi_add().
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @napi: the NAPI context
  */
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
-                       struct net_device *napi_dev,
-                       int (*poll)(struct napi_struct *, int),
-                       int weight);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+                      struct napi_struct *napi);
 
 /**
  * ieee80211_rx - receive frame
@@ -3731,7 +3744,10 @@ void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
  * @hw: the hardware this frame came in on
  * @skb: the buffer to receive, owned by mac80211 after this call
  */
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
+static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       ieee80211_rx_napi(hw, skb, NULL);
+}
 
 /**
  * ieee80211_rx_irqsafe - receive frame
@@ -4314,19 +4330,6 @@ void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
 void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
                            struct sk_buff *skb, u8 *p2k);
 
-/**
- * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
- *
- * This function computes the two AES-CMAC sub-keys, based on the
- * previously installed master key.
- *
- * @keyconf: the parameter passed with the set key
- * @k1: a buffer to be filled with the 1st sub-key
- * @k2: a buffer to be filled with the 2nd sub-key
- */
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
-                                       u8 *k1, u8 *k2);
-
 /**
  * ieee80211_get_key_tx_seq - get key TX sequence counter
  *
index b3a7751251b4cb9ce1a7fcb1d3999a63f4ff5074..aba5695fadb00df5fb83ff2439474c1f7ecda65a 100644 (file)
@@ -182,7 +182,8 @@ int ndisc_rcv(struct sk_buff *skb);
 
 void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
                   const struct in6_addr *solicit,
-                  const struct in6_addr *daddr, const struct in6_addr *saddr);
+                  const struct in6_addr *daddr, const struct in6_addr *saddr,
+                  struct sk_buff *oskb);
 
 void ndisc_send_rs(struct net_device *dev,
                   const struct in6_addr *saddr, const struct in6_addr *daddr);
index bd33e66f49aad086784b2dd66cb054ea18e73c7a..8b683841e5743f011a0d362af4ea9c26d0e75c77 100644 (file)
@@ -125,6 +125,7 @@ struct neigh_statistics {
        unsigned long forced_gc_runs;   /* number of forced GC runs */
 
        unsigned long unres_discards;   /* number of unresolved drops */
+       unsigned long table_fulls;      /* times even gc couldn't help */
 };
 
 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
index e951453e0a2378caf405f62910dd91d7c768ea3b..2dcea635ecce3ead337ffa5fd2ba68096db9f997 100644 (file)
@@ -118,6 +118,9 @@ struct net {
 #endif
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
+#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
+       struct list_head        nfnl_acct_list;
+#endif
 #endif
 #ifdef CONFIG_WEXT_CORE
        struct sk_buff_head     wext_nlevents;
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
new file mode 100644 (file)
index 0000000..42008f1
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV4_H_
+#define _NF_DUP_IPV4_H_
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+                const struct in_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
new file mode 100644 (file)
index 0000000..ed6bd66
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV6_H_
+#define _NF_DUP_IPV6_H_
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+                const struct in6_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV6_H_ */
index 37cd3911d5c59e97fe6328a2852ea17040f4dbc3..f5e23c6dee8bcbcc66705a4d5cefdaef311eb98b 100644 (file)
@@ -250,8 +250,12 @@ void nf_ct_untracked_status_or(unsigned long bits);
 void nf_ct_iterate_cleanup(struct net *net,
                           int (*iter)(struct nf_conn *i, void *data),
                           void *data, u32 portid, int report);
+
+struct nf_conntrack_zone;
+
 void nf_conntrack_free(struct nf_conn *ct);
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+                                  const struct nf_conntrack_zone *zone,
                                   const struct nf_conntrack_tuple *orig,
                                   const struct nf_conntrack_tuple *repl,
                                   gfp_t gfp);
@@ -291,7 +295,9 @@ extern unsigned int nf_conntrack_max;
 extern unsigned int nf_conntrack_hash_rnd;
 void init_nf_conntrack_hash_rnd(void);
 
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+                                const struct nf_conntrack_zone *zone,
+                                gfp_t flags);
 
 #define NF_CT_STAT_INC(net, count)       __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
index f2f0fa3bb15073edeb0af54087087539842e17f1..c03f9c42b3cd32be938e282e47cf66e63536a8c3 100644 (file)
@@ -52,7 +52,8 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 
 /* Find a connection corresponding to a tuple. */
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net,
+                     const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple);
 
 int __nf_conntrack_confirm(struct sk_buff *skb);
index 3f3aecbc8632e935e16e6517bc23eb8e07a9b16d..dce56f09ac9aed9c0f7d4a1d99acc9602e33ea5f 100644 (file)
@@ -4,7 +4,9 @@
 
 #ifndef _NF_CONNTRACK_EXPECT_H
 #define _NF_CONNTRACK_EXPECT_H
+
 #include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 extern unsigned int nf_ct_expect_hsize;
 extern unsigned int nf_ct_expect_max;
@@ -76,15 +78,18 @@ int nf_conntrack_expect_init(void);
 void nf_conntrack_expect_fini(void);
 
 struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+                   const struct nf_conntrack_zone *zone,
                    const struct nf_conntrack_tuple *tuple);
 
 struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+                     const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple);
 
 struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+                      const struct nf_conntrack_zone *zone,
                       const struct nf_conntrack_tuple *tuple);
 
 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
index 034efe8d45a544ac9557724b4e40359398dd1279..5316c7b3a374db1aa0b724e4a133a8f656bbb8da 100644 (file)
 #ifndef _NF_CONNTRACK_ZONES_H
 #define _NF_CONNTRACK_ZONES_H
 
-#define NF_CT_DEFAULT_ZONE     0
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-#include <net/netfilter/nf_conntrack_extend.h>
+#define NF_CT_DEFAULT_ZONE_ID  0
+
+#define NF_CT_ZONE_DIR_ORIG    (1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL    (1 << IP_CT_DIR_REPLY)
+
+#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
+
+#define NF_CT_FLAG_MARK                1
 
 struct nf_conntrack_zone {
        u16     id;
+       u8      flags;
+       u8      dir;
 };
 
-static inline u16 nf_ct_zone(const struct nf_conn *ct)
+extern const struct nf_conntrack_zone nf_ct_zone_dflt;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack_extend.h>
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone(const struct nf_conn *ct)
 {
+       const struct nf_conntrack_zone *nf_ct_zone = NULL;
+
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-       struct nf_conntrack_zone *nf_ct_zone;
        nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
-       if (nf_ct_zone)
-               return nf_ct_zone->id;
 #endif
-       return NF_CT_DEFAULT_ZONE;
+       return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
+{
+       zone->id = id;
+       zone->flags = flags;
+       zone->dir = dir;
+
+       return zone;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
+               struct nf_conntrack_zone *tmp)
+{
+       const struct nf_conntrack_zone *zone;
+
+       if (!tmpl)
+               return &nf_ct_zone_dflt;
+
+       zone = nf_ct_zone(tmpl);
+       if (zone->flags & NF_CT_FLAG_MARK)
+               zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0);
+
+       return zone;
+}
+
+static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags,
+                                const struct nf_conntrack_zone *info)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       struct nf_conntrack_zone *nf_ct_zone;
+
+       nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
+       if (!nf_ct_zone)
+               return -ENOMEM;
+
+       nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
+                       info->flags);
+#endif
+       return 0;
 }
 
-#endif /* CONFIG_NF_CONNTRACK || CONFIG_NF_CONNTRACK_MODULE */
+static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
+                                         enum ip_conntrack_dir dir)
+{
+       return zone->dir & (1 << dir);
+}
+
+static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
+                               enum ip_conntrack_dir dir)
+{
+       return nf_ct_zone_matches_dir(zone, dir) ?
+              zone->id : NF_CT_DEFAULT_ZONE_ID;
+}
+
+static inline bool nf_ct_zone_equal(const struct nf_conn *a,
+                                   const struct nf_conntrack_zone *b,
+                                   enum ip_conntrack_dir dir)
+{
+       return nf_ct_zone_id(nf_ct_zone(a), dir) ==
+              nf_ct_zone_id(b, dir);
+}
+
+static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
+                                       const struct nf_conntrack_zone *b)
+{
+       return nf_ct_zone(a)->id == b->id;
+}
+#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
 #endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h
new file mode 100644 (file)
index 0000000..6b84cf6
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _NFT_DUP_H_
+#define _NFT_DUP_H_
+
+struct nft_dup_inet {
+       enum nft_registers      sreg_addr:8;
+       enum nft_registers      sreg_dev:8;
+};
+
+#endif /* _NFT_DUP_H_ */
index 01fc8c53111564891a186f817d710f4e6c68b80a..d0d0f1e53bb95efccfa7e1f311957087ae7724c4 100644 (file)
@@ -79,6 +79,7 @@ struct nci_ops {
        int   (*close)(struct nci_dev *ndev);
        int   (*send)(struct nci_dev *ndev, struct sk_buff *skb);
        int   (*setup)(struct nci_dev *ndev);
+       int   (*post_setup)(struct nci_dev *ndev);
        int   (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
        __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
        int   (*discover_se)(struct nci_dev *ndev);
@@ -277,6 +278,8 @@ int nci_request(struct nci_dev *ndev,
                            unsigned long opt),
                unsigned long opt, __u32 timeout);
 int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
+int nci_core_reset(struct nci_dev *ndev);
+int nci_core_init(struct nci_dev *ndev);
 
 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
index f9e58ae45f9c2188a8c7f7070716f40fa4b548c2..30afc9a6718c01d7ef22ab6758a8a7c4a8019acf 100644 (file)
@@ -203,6 +203,7 @@ struct nfc_dev {
        int n_vendor_cmds;
 
        struct nfc_ops *ops;
+       struct genl_info *cur_cmd_info;
 };
 #define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
 
@@ -318,4 +319,44 @@ static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
        return 0;
 }
 
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+                                                enum nfc_attrs attr,
+                                                u32 oui, u32 subcmd,
+                                                int approxlen);
+int nfc_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * nfc_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @dev: nfc device
+ * @oui: vendor oui
+ * @approxlen: an upper bound of the length of the data that will
+ *      be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NFC_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the vendor data attribute.
+ * You must not modify the skb in any other way.
+ *
+ * When done, call nfc_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+nfc_vendor_cmd_alloc_reply_skb(struct nfc_dev *dev,
+                               u32 oui, u32 subcmd, int approxlen)
+{
+       return __nfc_alloc_vendor_cmd_reply_skb(dev,
+                                               NFC_ATTR_VENDOR_DATA,
+                                               oui,
+                                               subcmd, approxlen);
+}
+
 #endif /* __NET_NFC_H */
index b0ab530d28cde08478beb19881965c9a24e030cc..cf2713d8b975f11c6f7ba6725af165a53b5e82bb 100644 (file)
@@ -52,6 +52,8 @@ enum nl802154_commands {
 
        NL802154_CMD_SET_LBT_MODE,
 
+       NL802154_CMD_SET_ACKREQ_DEFAULT,
+
        /* add new commands above here */
 
        /* used to define NL802154_CMD_MAX below */
@@ -104,6 +106,8 @@ enum nl802154_attrs {
 
        NL802154_ATTR_SUPPORTED_COMMANDS,
 
+       NL802154_ATTR_ACKREQ_DEFAULT,
+
        /* add attributes here, update the policy in nl802154.c */
 
        __NL802154_ATTR_AFTER_LAST,
index 2d45f419477fedadeef42818e7159c148dfd6b84..395d79bb556cf11b62a929cfb03172e30b84d4b1 100644 (file)
@@ -66,7 +66,6 @@ struct rtable {
 
        struct list_head        rt_uncached;
        struct uncached_list    *rt_uncached_list;
-       struct lwtunnel_state   *rt_lwtstate;
 };
 
 static inline bool rt_is_input_route(const struct rtable *rt)
@@ -189,8 +188,12 @@ void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
 void ip_rt_send_redirect(struct sk_buff *skb);
 
 unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, int tb_id);
 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
                                __be32 addr);
+unsigned int inet_addr_type_dev_table(struct net *net,
+                                     const struct net_device *dev,
+                                     __be32 addr);
 void ip_rt_multicast_event(struct in_device *);
 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
@@ -251,6 +254,9 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
        if (inet_sk(sk)->transparent)
                flow_flags |= FLOWI_FLAG_ANYSRC;
 
+       if (netif_index_is_vrf(sock_net(sk), oif))
+               flow_flags |= FLOWI_FLAG_VRFSRC;
+
        flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
                           protocol, flow_flags, dst, src, dport, sport);
 }
index 89da8934519bb720c4d5f1f479040a889e6079fd..319baab3b48efb9035a7987c6dbf96b9f8afe5b0 100644 (file)
@@ -72,6 +72,7 @@ struct switchdev_obj {
                struct switchdev_obj_fdb {              /* PORT_FDB */
                        const unsigned char *addr;
                        u16 vid;
+                       u16 ndm_state;
                } fdb;
        } u;
 };
diff --git a/include/net/vrf.h b/include/net/vrf.h
new file mode 100644 (file)
index 0000000..5bfb162
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * include/net/net_vrf.h - adds vrf dev structure definitions
+ * Copyright (c) 2015 Cumulus Networks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_NET_VRF_H
+#define __LINUX_NET_VRF_H
+
+struct net_vrf_dev {
+       struct rcu_head         rcu;
+       int                     ifindex; /* ifindex of master dev */
+       u32                     tb_id;   /* table id for VRF */
+};
+
+struct slave {
+       struct list_head        list;
+       struct net_device       *dev;
+};
+
+struct slave_queue {
+       struct list_head        all_slaves;
+};
+
+struct net_vrf {
+       struct slave_queue      queue;
+       struct rtable           *rth;
+       u32                     tb_id;
+};
+
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+/* called with rcu_read_lock() */
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+       struct net_vrf_dev *vrf_ptr;
+       int ifindex = 0;
+
+       if (!dev)
+               return 0;
+
+       if (netif_is_vrf(dev)) {
+               ifindex = dev->ifindex;
+       } else {
+               vrf_ptr = rcu_dereference(dev->vrf_ptr);
+               if (vrf_ptr)
+                       ifindex = vrf_ptr->ifindex;
+       }
+
+       return ifindex;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+       int ifindex;
+
+       rcu_read_lock();
+       ifindex = vrf_master_ifindex_rcu(dev);
+       rcu_read_unlock();
+
+       return ifindex;
+}
+
+/* called with rcu_read_lock */
+static inline int vrf_dev_table_rcu(const struct net_device *dev)
+{
+       int tb_id = 0;
+
+       if (dev) {
+               struct net_vrf_dev *vrf_ptr;
+
+               vrf_ptr = rcu_dereference(dev->vrf_ptr);
+               if (vrf_ptr)
+                       tb_id = vrf_ptr->tb_id;
+       }
+       return tb_id;
+}
+
+static inline int vrf_dev_table(const struct net_device *dev)
+{
+       int tb_id;
+
+       rcu_read_lock();
+       tb_id = vrf_dev_table_rcu(dev);
+       rcu_read_unlock();
+
+       return tb_id;
+}
+
+static inline int vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+       struct net_device *dev;
+       int tb_id = 0;
+
+       if (!ifindex)
+               return 0;
+
+       rcu_read_lock();
+
+       dev = dev_get_by_index_rcu(net, ifindex);
+       if (dev)
+               tb_id = vrf_dev_table_rcu(dev);
+
+       rcu_read_unlock();
+
+       return tb_id;
+}
+
+/* called with rtnl */
+static inline int vrf_dev_table_rtnl(const struct net_device *dev)
+{
+       int tb_id = 0;
+
+       if (dev) {
+               struct net_vrf_dev *vrf_ptr;
+
+               vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+               if (vrf_ptr)
+                       tb_id = vrf_ptr->tb_id;
+       }
+       return tb_id;
+}
+
+/* caller has already checked netif_is_vrf(dev) */
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+       struct rtable *rth = ERR_PTR(-ENETUNREACH);
+       struct net_vrf *vrf = netdev_priv(dev);
+
+       if (vrf) {
+               rth = vrf->rth;
+               atomic_inc(&rth->dst.__refcnt);
+       }
+       return rth;
+}
+
+#else
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+       return 0;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+       return 0;
+}
+
+static inline int vrf_dev_table_rcu(const struct net_device *dev)
+{
+       return 0;
+}
+
+static inline int vrf_dev_table(const struct net_device *dev)
+{
+       return 0;
+}
+
+static inline int vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+       return 0;
+}
+
+static inline int vrf_dev_table_rtnl(const struct net_device *dev)
+{
+       return 0;
+}
+
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+       return ERR_PTR(-ENETUNREACH);
+}
+#endif
+
+#endif /* __LINUX_NET_VRF_H */
index eb8d721cdb676af4a5842e810fdd447f0536691d..6b3234599a2c52463d91d985a283db7f5b1d417a 100644 (file)
@@ -161,6 +161,7 @@ struct vxlan_dev {
        struct timer_list age_timer;
        spinlock_t        hash_lock;
        unsigned int      addrcnt;
+       struct gro_cells  gro_cells;
 
        struct vxlan_config     cfg;
 
@@ -181,7 +182,6 @@ struct vxlan_dev {
 #define VXLAN_F_GBP                    0x800
 #define VXLAN_F_REMCSUM_NOPARTIAL      0x1000
 #define VXLAN_F_COLLECT_METADATA       0x2000
-#define VXLAN_F_FLOW_BASED             0x4000
 
 /* Flags that are used in the receive path. These flags must match in
  * order for a socket to be shareable
@@ -190,8 +190,7 @@ struct vxlan_dev {
                                         VXLAN_F_UDP_ZERO_CSUM6_RX |    \
                                         VXLAN_F_REMCSUM_RX |           \
                                         VXLAN_F_REMCSUM_NOPARTIAL |    \
-                                        VXLAN_F_COLLECT_METADATA |     \
-                                        VXLAN_F_FLOW_BASED)
+                                        VXLAN_F_COLLECT_METADATA)
 
 struct net_device *vxlan_dev_create(struct net *net, const char *name,
                                    u8 name_assign_type, struct vxlan_config *conf);
@@ -243,3 +242,8 @@ static inline void vxlan_get_rx_port(struct net_device *netdev)
 }
 #endif
 #endif
+
+static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
+{
+       return vs->sock->sk->sk_family;
+}
index f0ee97eec24d28625d9c3f714ab18a72b0b8f125..312e3fee9ccfc098f70cd8ec6edb87881e1d1f99 100644 (file)
@@ -285,10 +285,13 @@ struct xfrm_policy_afinfo {
        unsigned short          family;
        struct dst_ops          *dst_ops;
        void                    (*garbage_collect)(struct net *net);
-       struct dst_entry        *(*dst_lookup)(struct net *net, int tos,
+       struct dst_entry        *(*dst_lookup)(struct net *net,
+                                              int tos, int oif,
                                               const xfrm_address_t *saddr,
                                               const xfrm_address_t *daddr);
-       int                     (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
+       int                     (*get_saddr)(struct net *net, int oif,
+                                            xfrm_address_t *saddr,
+                                            xfrm_address_t *daddr);
        void                    (*decode_session)(struct sk_buff *skb,
                                                  struct flowi *fl,
                                                  int reverse);
index 4942710ef720ea5716e8cc6ebf0df941e22500ba..8d1d7fa67ec48bad6872be07258066f9410eec6e 100644 (file)
@@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
                                   u64 * info_out);
 
 extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-extern void scsi_set_sense_information(u8 *buf, u64 info);
 
 extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 
index 865a141b118b15874e27b0e56473bac8854c4823..427bc41df3aef3a3f931bd94830f027bafea2661 100644 (file)
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
        int io_ops_count;
 };
 
+#ifdef CONFIG_SND_SOC_TOPOLOGY
+
 /* gets a pointer to data from the firmware block header */
 static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
 {
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
        const struct snd_soc_tplg_widget_events *events, int num_events,
        u16 event_type);
 
+#else
+
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
+                                               u32 index)
+{
+       return 0;
+}
+
+#endif
+
 #endif
index 2ce13c109b006a5504946d785adaabe563a57a5e..92a48e2d54619f80113c13f9fff45f98ddfabe30 100644 (file)
@@ -114,6 +114,7 @@ enum bpf_map_type {
        BPF_MAP_TYPE_HASH,
        BPF_MAP_TYPE_ARRAY,
        BPF_MAP_TYPE_PROG_ARRAY,
+       BPF_MAP_TYPE_PERF_EVENT_ARRAY,
 };
 
 enum bpf_prog_type {
@@ -270,6 +271,7 @@ enum bpf_func_id {
         */
        BPF_FUNC_skb_get_tunnel_key,
        BPF_FUNC_skb_set_tunnel_key,
+       BPF_FUNC_perf_event_read,       /* u64 bpf_perf_event_read(&map, index) */
        __BPF_FUNC_MAX_ID,
 };
 
index ea047480a1f0ddca2025fd12db262b7bb359f9c6..313c305fd1ad47c862fa0c4da2c44c7a496c47af 100644 (file)
@@ -230,6 +230,7 @@ enum {
        IFLA_BR_AGEING_TIME,
        IFLA_BR_STP_STATE,
        IFLA_BR_PRIORITY,
+       IFLA_BR_VLAN_FILTERING,
        __IFLA_BR_MAX,
 };
 
@@ -340,6 +341,15 @@ enum macvlan_macaddr_mode {
 
 #define MACVLAN_FLAG_NOPROMISC 1
 
+/* VRF section */
+enum {
+       IFLA_VRF_UNSPEC,
+       IFLA_VRF_TABLE,
+       __IFLA_VRF_MAX
+};
+
+#define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
+
 /* IPVLAN section */
 enum {
        IFLA_IPVLAN_UNSPEC,
@@ -382,7 +392,6 @@ enum {
        IFLA_VXLAN_REMCSUM_RX,
        IFLA_VXLAN_GBP,
        IFLA_VXLAN_REMCSUM_NOPARTIAL,
-       IFLA_VXLAN_FLOWBASED,
        IFLA_VXLAN_COLLECT_METADATA,
        __IFLA_VXLAN_MAX
 };
index d3d715f8c88f6d57c4318dc5b001e8efad2d074f..9e7edfd8141e5dea69129807f46b89b2b637ead9 100644 (file)
@@ -55,6 +55,7 @@ struct sockaddr_ll {
 #define PACKET_TX_HAS_OFF              19
 #define PACKET_QDISC_BYPASS            20
 #define PACKET_ROLLOVER_STATS          21
+#define PACKET_FANOUT_DATA             22
 
 #define PACKET_FANOUT_HASH             0
 #define PACKET_FANOUT_LB               1
@@ -62,6 +63,8 @@ struct sockaddr_ll {
 #define PACKET_FANOUT_ROLLOVER         3
 #define PACKET_FANOUT_RND              4
 #define PACKET_FANOUT_QM               5
+#define PACKET_FANOUT_CBPF             6
+#define PACKET_FANOUT_EBPF             7
 #define PACKET_FANOUT_FLAG_ROLLOVER    0x1000
 #define PACKET_FANOUT_FLAG_DEFRAG      0x8000
 
index bd3cc11a431f9e5bb01f18d84d3f388915043367..af4de90ba27d18c76944b062d1db4e4267dcd844 100644 (file)
@@ -112,6 +112,7 @@ enum {
        IFLA_GRE_ENCAP_FLAGS,
        IFLA_GRE_ENCAP_SPORT,
        IFLA_GRE_ENCAP_DPORT,
+       IFLA_GRE_COLLECT_METADATA,
        __IFLA_GRE_MAX,
 };
 
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
new file mode 100644 (file)
index 0000000..7ed9e67
--- /dev/null
@@ -0,0 +1,15 @@
+/* ila.h - ILA Interface */
+
+#ifndef _UAPI_LINUX_ILA_H
+#define _UAPI_LINUX_ILA_H
+
+enum {
+       ILA_ATTR_UNSPEC,
+       ILA_ATTR_LOCATOR,                       /* u64 */
+
+       __ILA_ATTR_MAX,
+};
+
+#define ILA_ATTR_MAX           (__ILA_ATTR_MAX - 1)
+
+#endif /* _UAPI_LINUX_ILA_H */
index 80f3b74446a1a3e56704550138e816014537e60f..38b4fef20219242fad287c0379e5ff58b7dedcea 100644 (file)
@@ -173,6 +173,7 @@ enum {
        DEVCONF_STABLE_SECRET,
        DEVCONF_USE_OIF_ADDRS_ONLY,
        DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+       DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
        DEVCONF_MAX
 };
 
index 31377bbea3f8fc51caa41a8e7f34e7bf283da9ff..34141a5dfe745877d1e3905373a2255e93b888ab 100644 (file)
@@ -7,10 +7,41 @@ enum lwtunnel_encap_types {
        LWTUNNEL_ENCAP_NONE,
        LWTUNNEL_ENCAP_MPLS,
        LWTUNNEL_ENCAP_IP,
+       LWTUNNEL_ENCAP_ILA,
+       LWTUNNEL_ENCAP_IP6,
        __LWTUNNEL_ENCAP_MAX,
 };
 
 #define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
 
+enum lwtunnel_ip_t {
+       LWTUNNEL_IP_UNSPEC,
+       LWTUNNEL_IP_ID,
+       LWTUNNEL_IP_DST,
+       LWTUNNEL_IP_SRC,
+       LWTUNNEL_IP_TTL,
+       LWTUNNEL_IP_TOS,
+       LWTUNNEL_IP_SPORT,
+       LWTUNNEL_IP_DPORT,
+       LWTUNNEL_IP_FLAGS,
+       __LWTUNNEL_IP_MAX,
+};
+
+#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1)
+
+enum lwtunnel_ip6_t {
+       LWTUNNEL_IP6_UNSPEC,
+       LWTUNNEL_IP6_ID,
+       LWTUNNEL_IP6_DST,
+       LWTUNNEL_IP6_SRC,
+       LWTUNNEL_IP6_HOPLIMIT,
+       LWTUNNEL_IP6_TC,
+       LWTUNNEL_IP6_SPORT,
+       LWTUNNEL_IP6_DPORT,
+       LWTUNNEL_IP6_FLAGS,
+       __LWTUNNEL_IP6_MAX,
+};
+
+#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
 
 #endif /* _UAPI_LWTUNNEL_H_ */
index 2e35c61bbdd192eb08c8dbe787f179cd62b9aaad..788655bfa0f3001d549e90c8fe5dd811600e3a98 100644 (file)
@@ -106,6 +106,7 @@ struct ndt_stats {
        __u64           ndts_rcv_probes_ucast;
        __u64           ndts_periodic_gc_runs;
        __u64           ndts_forced_gc_runs;
+       __u64           ndts_table_fulls;
 };
 
 enum {
index a99e6a9971408014514c21cc338df70ea5421f34..d8c8a7c9d88a7068c00d2b1cebe251c3c3c0dde4 100644 (file)
@@ -756,16 +756,25 @@ enum nft_ct_attributes {
 };
 #define NFTA_CT_MAX            (__NFTA_CT_MAX - 1)
 
+enum nft_limit_type {
+       NFT_LIMIT_PKTS,
+       NFT_LIMIT_PKT_BYTES
+};
+
 /**
  * enum nft_limit_attributes - nf_tables limit expression netlink attributes
  *
  * @NFTA_LIMIT_RATE: refill rate (NLA_U64)
  * @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ * @NFTA_LIMIT_BURST: burst (NLA_U32)
+ * @NFTA_LIMIT_TYPE: type of limit (NLA_U32: enum nft_limit_type)
  */
 enum nft_limit_attributes {
        NFTA_LIMIT_UNSPEC,
        NFTA_LIMIT_RATE,
        NFTA_LIMIT_UNIT,
+       NFTA_LIMIT_BURST,
+       NFTA_LIMIT_TYPE,
        __NFTA_LIMIT_MAX
 };
 #define NFTA_LIMIT_MAX         (__NFTA_LIMIT_MAX - 1)
@@ -935,6 +944,20 @@ enum nft_redir_attributes {
 };
 #define NFTA_REDIR_MAX         (__NFTA_REDIR_MAX - 1)
 
+/**
+ * enum nft_dup_attributes - nf_tables dup expression netlink attributes
+ *
+ * @NFTA_DUP_SREG_ADDR: source register of address (NLA_U32: nft_registers)
+ * @NFTA_DUP_SREG_DEV: source register of output interface (NLA_U32: nft_register)
+ */
+enum nft_dup_attributes {
+       NFTA_DUP_UNSPEC,
+       NFTA_DUP_SREG_ADDR,
+       NFTA_DUP_SREG_DEV,
+       __NFTA_DUP_MAX
+};
+#define NFTA_DUP_MAX           (__NFTA_DUP_MAX - 1)
+
 /**
  * enum nft_gen_attributes - nf_tables ruleset generation attributes
  *
index acad6c52a6521d0fe81d1078a95fc7bb032d796c..c1a4e1441a25416e960349414b6a71e2c4409189 100644 (file)
@@ -61,6 +61,7 @@ enum ctattr_tuple {
        CTA_TUPLE_UNSPEC,
        CTA_TUPLE_IP,
        CTA_TUPLE_PROTO,
+       CTA_TUPLE_ZONE,
        __CTA_TUPLE_MAX
 };
 #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
index 5a688c1ca4d78e1cab449b26f8f3d2afdd190161..9e520418b858d5f5a355ee8514f5f1d3ee4a827f 100644 (file)
@@ -6,7 +6,13 @@
 enum {
        XT_CT_NOTRACK           = 1 << 0,
        XT_CT_NOTRACK_ALIAS     = 1 << 1,
-       XT_CT_MASK              = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS,
+       XT_CT_ZONE_DIR_ORIG     = 1 << 2,
+       XT_CT_ZONE_DIR_REPL     = 1 << 3,
+       XT_CT_ZONE_MARK         = 1 << 4,
+
+       XT_CT_MASK              = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS |
+                                 XT_CT_ZONE_DIR_ORIG | XT_CT_ZONE_DIR_REPL |
+                                 XT_CT_ZONE_MARK,
 };
 
 struct xt_ct_target_info {
index efe3443572baa5a3650d638806fd60ef09a90f68..413417f3707bbfde6375dfc14098bc1e099b5b70 100644 (file)
 #define PCI_MSIX_PBA           8       /* Pending Bit Array offset */
 #define  PCI_MSIX_PBA_BIR      0x00000007 /* BAR index */
 #define  PCI_MSIX_PBA_OFFSET   0xfffffff8 /* Offset into specified BAR */
+#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
 #define PCI_CAP_MSIX_SIZEOF    12      /* size of MSIX registers */
 
 /* MSI-X Table entry format */
index 47d24cb3fbc1f8017f715dff06ab5aa5f977f6b0..0d3d3cc43356e128bc618acbde912fe6b5524ff0 100644 (file)
@@ -286,21 +286,6 @@ enum rt_class_t {
 
 /* Routing message attributes */
 
-enum ip_tunnel_t {
-       IP_TUN_UNSPEC,
-       IP_TUN_ID,
-       IP_TUN_DST,
-       IP_TUN_SRC,
-       IP_TUN_TTL,
-       IP_TUN_TOS,
-       IP_TUN_SPORT,
-       IP_TUN_DPORT,
-       IP_TUN_FLAGS,
-       __IP_TUN_MAX,
-};
-
-#define IP_TUN_MAX (__IP_TUN_MAX - 1)
-
 enum rtattr_type_t {
        RTA_UNSPEC,
        RTA_DST,
index 785c5ca0994b5ab41e43fcfe6553197f6633dd06..247c50bd60f0d067ad8884dbe4574adf0bfcf596 100644 (file)
 #include <linux/types.h>
 #include <sound/asound.h>
 
+#ifndef __KERNEL__
+#error This API is an early revision and not enabled in the current
+#error kernel release, it will be enabled in a future kernel version
+#error with incompatible changes to what is here.
+#endif
+
 /*
  * Maximum number of channels topology kcontrol can represent.
  */
@@ -77,7 +83,7 @@
 #define SND_SOC_TPLG_NUM_TEXTS         16
 
 /* ABI version */
-#define SND_SOC_TPLG_ABI_VERSION       0x2
+#define SND_SOC_TPLG_ABI_VERSION       0x3
 
 /* Max size of TLV data */
 #define SND_SOC_TPLG_TLV_SIZE          32
 #define SND_SOC_TPLG_TYPE_PCM          7
 #define SND_SOC_TPLG_TYPE_MANIFEST     8
 #define SND_SOC_TPLG_TYPE_CODEC_LINK   9
-#define SND_SOC_TPLG_TYPE_MAX  SND_SOC_TPLG_TYPE_CODEC_LINK
+#define SND_SOC_TPLG_TYPE_PDATA                10
+#define SND_SOC_TPLG_TYPE_MAX  SND_SOC_TPLG_TYPE_PDATA
 
 /* vendor block IDs - please add new vendor types to end */
 #define SND_SOC_TPLG_TYPE_VENDOR_FW    1000
@@ -137,11 +144,19 @@ struct snd_soc_tplg_private {
 /*
  * Kcontrol TLV data.
  */
+struct snd_soc_tplg_tlv_dbscale {
+       __le32 min;
+       __le32 step;
+       __le32 mute;
+} __attribute__((packed));
+
 struct snd_soc_tplg_ctl_tlv {
-       __le32 size;    /* in bytes aligned to 4 */
-       __le32 numid;   /* control element numeric identification */
-       __le32 count;   /* number of elem in data array */
-       __le32 data[SND_SOC_TPLG_TLV_SIZE];
+       __le32 size;    /* in bytes of this structure */
+       __le32 type;    /* SNDRV_CTL_TLVT_*, type of TLV */
+       union {
+               __le32 data[SND_SOC_TPLG_TLV_SIZE];
+               struct snd_soc_tplg_tlv_dbscale scale;
+       };
 } __attribute__((packed));
 
 /*
@@ -155,9 +170,11 @@ struct snd_soc_tplg_channel {
 } __attribute__((packed));
 
 /*
- * Kcontrol Operations IDs
+ * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
+ * Kcontrol ops need get/put/info.
+ * Bytes ext ops need get/put.
  */
-struct snd_soc_tplg_kcontrol_ops_id {
+struct snd_soc_tplg_io_ops {
        __le32 get;
        __le32 put;
        __le32 info;
@@ -171,8 +188,8 @@ struct snd_soc_tplg_ctl_hdr {
        __le32 type;
        char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
        __le32 access;
-       struct snd_soc_tplg_kcontrol_ops_id ops;
-       __le32 tlv_size;        /* non zero means control has TLV data */
+       struct snd_soc_tplg_io_ops ops;
+       struct snd_soc_tplg_ctl_tlv tlv;
 } __attribute__((packed));
 
 /*
@@ -238,6 +255,7 @@ struct snd_soc_tplg_manifest {
        __le32 graph_elems;     /* number of graph elements */
        __le32 dai_elems;       /* number of DAI elements */
        __le32 dai_link_elems;  /* number of DAI link elements */
+       struct snd_soc_tplg_private priv;
 } __attribute__((packed));
 
 /*
@@ -259,7 +277,6 @@ struct snd_soc_tplg_mixer_control {
        __le32 invert;
        __le32 num_channels;
        struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
-       struct snd_soc_tplg_ctl_tlv tlv;
        struct snd_soc_tplg_private priv;
 } __attribute__((packed));
 
@@ -303,6 +320,7 @@ struct snd_soc_tplg_bytes_control {
        __le32 mask;
        __le32 base;
        __le32 num_regs;
+       struct snd_soc_tplg_io_ops ext_ops;
        struct snd_soc_tplg_private priv;
 } __attribute__((packed));
 
@@ -347,6 +365,7 @@ struct snd_soc_tplg_dapm_widget {
        __le32 reg;             /* negative reg = no direct dapm */
        __le32 shift;           /* bits to shift */
        __le32 mask;            /* non-shifted mask */
+       __le32 subseq;          /* sort within widget type */
        __u32 invert;           /* invert the power bit */
        __u32 ignore_suspend;   /* kept enabled over suspend */
        __u16 event_flags;
index c5d5626289cee3a46f7e1b7d2441ca0496ba4471..56506553d4d80dff814b75f45db6db280fd0dea7 100644 (file)
@@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void)
        key_init();
        security_init();
        dbg_late_init();
-       vfs_caches_init(totalram_pages);
+       vfs_caches_init();
        signals_init();
        /* rootfs populating might need page-writeback */
        page_writeback_init();
index a24ba9fe5bb8892dfaa7452fe78f9ef68d1d97fc..161a1807e6efb0fe8e773c41dafc8b4a76b38f71 100644 (file)
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
                if (!leaf)
                        return -ENOMEM;
                INIT_LIST_HEAD(&leaf->msg_list);
-               info->qsize += sizeof(*leaf);
        }
        leaf->priority = msg->m_type;
        rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
                             "lazy leaf delete!\n");
                rb_erase(&leaf->rb_node, &info->msg_tree);
                if (info->node_cache) {
-                       info->qsize -= sizeof(*leaf);
                        kfree(leaf);
                } else {
                        info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
                if (list_empty(&leaf->msg_list)) {
                        rb_erase(&leaf->rb_node, &info->msg_tree);
                        if (info->node_cache) {
-                               info->qsize -= sizeof(*leaf);
                                kfree(leaf);
                        } else {
                                info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
                /* Save our speculative allocation into the cache */
                INIT_LIST_HEAD(&new_leaf->msg_list);
                info->node_cache = new_leaf;
-               info->qsize += sizeof(*new_leaf);
                new_leaf = NULL;
        } else {
                kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
                /* Save our speculative allocation into the cache */
                INIT_LIST_HEAD(&new_leaf->msg_list);
                info->node_cache = new_leaf;
-               info->qsize += sizeof(*new_leaf);
        } else {
                kfree(new_leaf);
        }
index bc3d530cb23efacb2e5695ad85a9bd3898524fa2..b471e5a3863ddbca70f2bf4dee22f40df0345fbe 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head)
        ipc_rcu_free(head);
 }
 
+/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked()      smp_rmb()
+
 /*
  * Wait until all currently ongoing simple ops have completed.
  * Caller must own sem_perm.lock.
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
                sem = sma->sem_base + i;
                spin_unlock_wait(&sem->lock);
        }
+       ipc_smp_acquire__after_spin_is_unlocked();
 }
 
 /*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                /* Then check that the global lock is free */
                if (!spin_is_locked(&sma->sem_perm.lock)) {
                        /*
-                        * The ipc object lock check must be visible on all
-                        * cores before rechecking the complex count.  Otherwise
-                        * we can race with  another thread that does:
+                        * We need a memory barrier with acquire semantics,
+                        * otherwise we can race with another thread that does:
                         *      complex_count++;
                         *      spin_unlock(sem_perm.lock);
                         */
-                       smp_rmb();
+                       ipc_smp_acquire__after_spin_is_unlocked();
 
                        /*
                         * Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
                rcu_read_lock();
                un = list_entry_rcu(ulp->list_proc.next,
                                    struct sem_undo, list_proc);
-               if (&un->list_proc == &ulp->list_proc)
-                       semid = -1;
-                else
-                       semid = un->semid;
+               if (&un->list_proc == &ulp->list_proc) {
+                       /*
+                        * We must wait for freeary() before freeing this ulp,
+                        * in case we raced with last sem_undo. There is a small
+                        * possibility where we exit while freeary() didn't
+                        * finish unlocking sem_undo_list.
+                        */
+                       spin_unlock_wait(&ulp->lock);
+                       rcu_read_unlock();
+                       break;
+               }
+               spin_lock(&ulp->lock);
+               semid = un->semid;
+               spin_unlock(&ulp->lock);
 
+               /* exit_sem raced with IPC_RMID, nothing to do */
                if (semid == -1) {
                        rcu_read_unlock();
-                       break;
+                       continue;
                }
 
-               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
                /* exit_sem raced with IPC_RMID, nothing to do */
                if (IS_ERR(sma)) {
                        rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
                ipc_assert_locked_object(&sma->sem_perm);
                list_del(&un->list_id);
 
-               spin_lock(&ulp->lock);
+               /* we are the last process using this ulp, acquiring ulp->lock
+                * isn't required. Besides that, we are also protected against
+                * IPC_RMID as we hold sma->sem_perm lock now
+                */
                list_del_rcu(&un->list_proc);
-               spin_unlock(&ulp->lock);
 
                /* perform adjustments registered in un */
                for (i = 0; i < sma->sem_nsems; i++) {
index 06e5cf2fe019faee43aa9f8ca9f17cad4973b74d..4aef24d91b633e12275cea64a380df4543fc796b 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
                if  ((shmflg & SHM_NORESERVE) &&
                                sysctl_overcommit_memory != OVERCOMMIT_NEVER)
                        acctflag = VM_NORESERVE;
-               file = shmem_file_setup(name, size, acctflag);
+               file = shmem_kernel_file_setup(name, size, acctflag);
        }
        error = PTR_ERR(file);
        if (IS_ERR(file))
index cb31229a6fa4ddd39c1d69038b998bc0c0dee4db..29ace107f2365c05b97f8f8aba54defeb59a6a78 100644 (file)
@@ -150,15 +150,15 @@ static int __init register_array_map(void)
 }
 late_initcall(register_array_map);
 
-static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
 {
-       /* only bpf_prog file descriptors can be stored in prog_array map */
+       /* only file descriptors can be stored in this type of map */
        if (attr->value_size != sizeof(u32))
                return ERR_PTR(-EINVAL);
        return array_map_alloc(attr);
 }
 
-static void prog_array_map_free(struct bpf_map *map)
+static void fd_array_map_free(struct bpf_map *map)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        int i;
@@ -167,21 +167,21 @@ static void prog_array_map_free(struct bpf_map *map)
 
        /* make sure it's empty */
        for (i = 0; i < array->map.max_entries; i++)
-               BUG_ON(array->prog[i] != NULL);
+               BUG_ON(array->ptrs[i] != NULL);
        kvfree(array);
 }
 
-static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key)
+static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
 {
        return NULL;
 }
 
 /* only called from syscall */
-static int prog_array_map_update_elem(struct bpf_map *map, void *key,
-                                     void *value, u64 map_flags)
+static int fd_array_map_update_elem(struct bpf_map *map, void *key,
+                                   void *value, u64 map_flags)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
-       struct bpf_prog *prog, *old_prog;
+       void *new_ptr, *old_ptr;
        u32 index = *(u32 *)key, ufd;
 
        if (map_flags != BPF_ANY)
@@ -191,57 +191,75 @@ static int prog_array_map_update_elem(struct bpf_map *map, void *key,
                return -E2BIG;
 
        ufd = *(u32 *)value;
-       prog = bpf_prog_get(ufd);
-       if (IS_ERR(prog))
-               return PTR_ERR(prog);
-
-       if (!bpf_prog_array_compatible(array, prog)) {
-               bpf_prog_put(prog);
-               return -EINVAL;
-       }
+       new_ptr = map->ops->map_fd_get_ptr(map, ufd);
+       if (IS_ERR(new_ptr))
+               return PTR_ERR(new_ptr);
 
-       old_prog = xchg(array->prog + index, prog);
-       if (old_prog)
-               bpf_prog_put_rcu(old_prog);
+       old_ptr = xchg(array->ptrs + index, new_ptr);
+       if (old_ptr)
+               map->ops->map_fd_put_ptr(old_ptr);
 
        return 0;
 }
 
-static int prog_array_map_delete_elem(struct bpf_map *map, void *key)
+static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
-       struct bpf_prog *old_prog;
+       void *old_ptr;
        u32 index = *(u32 *)key;
 
        if (index >= array->map.max_entries)
                return -E2BIG;
 
-       old_prog = xchg(array->prog + index, NULL);
-       if (old_prog) {
-               bpf_prog_put_rcu(old_prog);
+       old_ptr = xchg(array->ptrs + index, NULL);
+       if (old_ptr) {
+               map->ops->map_fd_put_ptr(old_ptr);
                return 0;
        } else {
                return -ENOENT;
        }
 }
 
+static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       struct bpf_prog *prog = bpf_prog_get(fd);
+       if (IS_ERR(prog))
+               return prog;
+
+       if (!bpf_prog_array_compatible(array, prog)) {
+               bpf_prog_put(prog);
+               return ERR_PTR(-EINVAL);
+       }
+       return prog;
+}
+
+static void prog_fd_array_put_ptr(void *ptr)
+{
+       struct bpf_prog *prog = ptr;
+
+       bpf_prog_put_rcu(prog);
+}
+
 /* decrement refcnt of all bpf_progs that are stored in this map */
-void bpf_prog_array_map_clear(struct bpf_map *map)
+void bpf_fd_array_map_clear(struct bpf_map *map)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        int i;
 
        for (i = 0; i < array->map.max_entries; i++)
-               prog_array_map_delete_elem(map, &i);
+               fd_array_map_delete_elem(map, &i);
 }
 
 static const struct bpf_map_ops prog_array_ops = {
-       .map_alloc = prog_array_map_alloc,
-       .map_free = prog_array_map_free,
+       .map_alloc = fd_array_map_alloc,
+       .map_free = fd_array_map_free,
        .map_get_next_key = array_map_get_next_key,
-       .map_lookup_elem = prog_array_map_lookup_elem,
-       .map_update_elem = prog_array_map_update_elem,
-       .map_delete_elem = prog_array_map_delete_elem,
+       .map_lookup_elem = fd_array_map_lookup_elem,
+       .map_update_elem = fd_array_map_update_elem,
+       .map_delete_elem = fd_array_map_delete_elem,
+       .map_fd_get_ptr = prog_fd_array_get_ptr,
+       .map_fd_put_ptr = prog_fd_array_put_ptr,
 };
 
 static struct bpf_map_type_list prog_array_type __read_mostly = {
@@ -255,3 +273,60 @@ static int __init register_prog_array_map(void)
        return 0;
 }
 late_initcall(register_prog_array_map);
+
+static void perf_event_array_map_free(struct bpf_map *map)
+{
+       bpf_fd_array_map_clear(map);
+       fd_array_map_free(map);
+}
+
+static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
+{
+       struct perf_event *event;
+       const struct perf_event_attr *attr;
+
+       event = perf_event_get(fd);
+       if (IS_ERR(event))
+               return event;
+
+       attr = perf_event_attrs(event);
+       if (IS_ERR(attr))
+               return (void *)attr;
+
+       if (attr->type != PERF_TYPE_RAW &&
+           attr->type != PERF_TYPE_HARDWARE) {
+               perf_event_release_kernel(event);
+               return ERR_PTR(-EINVAL);
+       }
+       return event;
+}
+
+static void perf_event_fd_array_put_ptr(void *ptr)
+{
+       struct perf_event *event = ptr;
+
+       perf_event_release_kernel(event);
+}
+
+static const struct bpf_map_ops perf_event_array_ops = {
+       .map_alloc = fd_array_map_alloc,
+       .map_free = perf_event_array_map_free,
+       .map_get_next_key = array_map_get_next_key,
+       .map_lookup_elem = fd_array_map_lookup_elem,
+       .map_update_elem = fd_array_map_update_elem,
+       .map_delete_elem = fd_array_map_delete_elem,
+       .map_fd_get_ptr = perf_event_fd_array_get_ptr,
+       .map_fd_put_ptr = perf_event_fd_array_put_ptr,
+};
+
+static struct bpf_map_type_list perf_event_array_type __read_mostly = {
+       .ops = &perf_event_array_ops,
+       .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+};
+
+static int __init register_perf_event_array_map(void)
+{
+       bpf_register_map_type(&perf_event_array_type);
+       return 0;
+}
+late_initcall(register_perf_event_array_map);
index fafa741614453c46ec9eb90d340221dcf9c3cc85..67c380cfa9ca5b6ed8e48b69b38ba3c611d51a65 100644 (file)
@@ -450,7 +450,7 @@ select_insn:
 
                tail_call_cnt++;
 
-               prog = READ_ONCE(array->prog[index]);
+               prog = READ_ONCE(array->ptrs[index]);
                if (unlikely(!prog))
                        goto out;
 
index a1b14d197a4fc2da6acf2d812372284e78bc5af8..dc9b464fefa954a50c83e632c4ebea2ec464839e 100644 (file)
@@ -72,7 +72,7 @@ static int bpf_map_release(struct inode *inode, struct file *filp)
                /* prog_array stores refcnt-ed bpf_prog pointers
                 * release them all when user space closes prog_array_fd
                 */
-               bpf_prog_array_map_clear(map);
+               bpf_fd_array_map_clear(map);
 
        bpf_map_put(map);
        return 0;
index cd307df98cb33fca49c2e02704336f0bdd7c736d..ed12e385fb75997f559e345989b178bc4c709e22 100644 (file)
@@ -238,6 +238,14 @@ static const char * const reg_type_str[] = {
        [CONST_IMM]             = "imm",
 };
 
+static const struct {
+       int map_type;
+       int func_id;
+} func_limit[] = {
+       {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
+       {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+};
+
 static void print_verifier_state(struct verifier_env *env)
 {
        enum bpf_reg_type t;
@@ -837,6 +845,28 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
        return err;
 }
 
+static int check_map_func_compatibility(struct bpf_map *map, int func_id)
+{
+       bool bool_map, bool_func;
+       int i;
+
+       if (!map)
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
+               bool_map = (map->map_type == func_limit[i].map_type);
+               bool_func = (func_id == func_limit[i].func_id);
+               /* only when map & func pair match it can continue.
+                * don't allow any other map type to be passed into
+                * the special func;
+                */
+               if (bool_map != bool_func)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int check_call(struct verifier_env *env, int func_id)
 {
        struct verifier_state *state = &env->cur_state;
@@ -912,21 +942,9 @@ static int check_call(struct verifier_env *env, int func_id)
                return -EINVAL;
        }
 
-       if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
-           func_id != BPF_FUNC_tail_call)
-               /* prog_array map type needs extra care:
-                * only allow to pass it into bpf_tail_call() for now.
-                * bpf_map_delete_elem() can be allowed in the future,
-                * while bpf_map_update_elem() must only be done via syscall
-                */
-               return -EINVAL;
-
-       if (func_id == BPF_FUNC_tail_call &&
-           map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
-               /* don't allow any other map type to be passed into
-                * bpf_tail_call()
-                */
-               return -EINVAL;
+       err = check_map_func_compatibility(map, func_id);
+       if (err)
+               return err;
 
        return 0;
 }
index ee14e3a35a2994399edf176e7775e778c395e592..f0acff0f66c91380412dcbc1c899c94b1d3236b0 100644 (file)
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
        spin_unlock_irq(&callback_lock);
 
        /* use trialcs->mems_allowed as a temp variable */
-       update_nodemasks_hier(cs, &cs->mems_allowed);
+       update_nodemasks_hier(cs, &trialcs->mems_allowed);
 done:
        return retval;
 }
index d3dae3419b99566c127f1682b29f39bb184bbdb1..a1339b13c578516c64b5a49f39bec55ba8eda989 100644 (file)
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
 
        perf_pmu_disable(event->pmu);
 
-       event->tstamp_running += tstamp - event->tstamp_stopped;
-
        perf_set_shadow_time(event, ctx, tstamp);
 
        perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
                goto out;
        }
 
+       event->tstamp_running += tstamp - event->tstamp_stopped;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        if (!ctx->nr_active++)
@@ -3212,6 +3212,59 @@ static inline u64 perf_event_count(struct perf_event *event)
        return __perf_event_count(event);
 }
 
+/*
+ * NMI-safe method to read a local event, that is an event that
+ * is:
+ *   - either for the current task, or for this CPU
+ *   - does not have inherit set, for inherited task events
+ *     will not be local and we cannot read them atomically
+ *   - must not have a pmu::count method
+ */
+u64 perf_event_read_local(struct perf_event *event)
+{
+       unsigned long flags;
+       u64 val;
+
+       /*
+        * Disabling interrupts avoids all counter scheduling (context
+        * switches, timer based rotation and IPIs).
+        */
+       local_irq_save(flags);
+
+       /* If this is a per-task event, it must be for current */
+       WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
+                    event->hw.target != current);
+
+       /* If this is a per-CPU event, it must be for this CPU */
+       WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
+                    event->cpu != smp_processor_id());
+
+       /*
+        * It must not be an event with inherit set, we cannot read
+        * all child counters from atomic context.
+        */
+       WARN_ON_ONCE(event->attr.inherit);
+
+       /*
+        * It must not have a pmu::count method, those are not
+        * NMI safe.
+        */
+       WARN_ON_ONCE(event->pmu->count);
+
+       /*
+        * If the event is currently on this CPU, its either a per-task event,
+        * or local to this CPU. Furthermore it means its ACTIVE (otherwise
+        * oncpu == -1).
+        */
+       if (event->oncpu == smp_processor_id())
+               event->pmu->read(event);
+
+       val = local64_read(&event->count);
+       local_irq_restore(flags);
+
+       return val;
+}
+
 static u64 perf_event_read(struct perf_event *event)
 {
        /*
@@ -3958,28 +4011,21 @@ static void perf_event_for_each(struct perf_event *event,
                perf_event_for_each_child(sibling, func);
 }
 
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
-       struct perf_event_context *ctx = event->ctx;
-       int ret = 0, active;
+struct period_event {
+       struct perf_event *event;
        u64 value;
+};
 
-       if (!is_sampling_event(event))
-               return -EINVAL;
-
-       if (copy_from_user(&value, arg, sizeof(value)))
-               return -EFAULT;
-
-       if (!value)
-               return -EINVAL;
+static int __perf_event_period(void *info)
+{
+       struct period_event *pe = info;
+       struct perf_event *event = pe->event;
+       struct perf_event_context *ctx = event->ctx;
+       u64 value = pe->value;
+       bool active;
 
-       raw_spin_lock_irq(&ctx->lock);
+       raw_spin_lock(&ctx->lock);
        if (event->attr.freq) {
-               if (value > sysctl_perf_event_sample_rate) {
-                       ret = -EINVAL;
-                       goto unlock;
-               }
-
                event->attr.sample_freq = value;
        } else {
                event->attr.sample_period = value;
@@ -3998,11 +4044,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
                event->pmu->start(event, PERF_EF_RELOAD);
                perf_pmu_enable(ctx->pmu);
        }
+       raw_spin_unlock(&ctx->lock);
 
-unlock:
+       return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+       struct period_event pe = { .event = event, };
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task;
+       u64 value;
+
+       if (!is_sampling_event(event))
+               return -EINVAL;
+
+       if (copy_from_user(&value, arg, sizeof(value)))
+               return -EFAULT;
+
+       if (!value)
+               return -EINVAL;
+
+       if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+               return -EINVAL;
+
+       task = ctx->task;
+       pe.value = value;
+
+       if (!task) {
+               cpu_function_call(event->cpu, __perf_event_period, &pe);
+               return 0;
+       }
+
+retry:
+       if (!task_function_call(task, __perf_event_period, &pe))
+               return 0;
+
+       raw_spin_lock_irq(&ctx->lock);
+       if (ctx->is_active) {
+               raw_spin_unlock_irq(&ctx->lock);
+               task = ctx->task;
+               goto retry;
+       }
+
+       __perf_event_period(&pe);
        raw_spin_unlock_irq(&ctx->lock);
 
-       return ret;
+       return 0;
 }
 
 static const struct file_operations perf_fops;
@@ -4740,12 +4828,20 @@ static const struct file_operations perf_fops = {
  * to user-space before waking everybody up.
  */
 
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+       /* only the parent has fasync state */
+       if (event->parent)
+               event = event->parent;
+       return &event->fasync;
+}
+
 void perf_event_wakeup(struct perf_event *event)
 {
        ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
-               kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+               kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
                event->pending_kill = 0;
        }
 }
@@ -6124,7 +6220,7 @@ static int __perf_event_overflow(struct perf_event *event,
        else
                perf_event_output(event, data, regs);
 
-       if (event->fasync && event->pending_kill) {
+       if (*perf_event_fasync(event) && event->pending_kill) {
                event->pending_wakeup = 1;
                irq_work_queue(&event->pending);
        }
@@ -8574,6 +8670,31 @@ void perf_event_delayed_put(struct task_struct *task)
                WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
 }
 
+struct perf_event *perf_event_get(unsigned int fd)
+{
+       int err;
+       struct fd f;
+       struct perf_event *event;
+
+       err = perf_fget_light(fd, &f);
+       if (err)
+               return ERR_PTR(err);
+
+       event = f.file->private_data;
+       atomic_long_inc(&event->refcount);
+       fdput(f);
+
+       return event;
+}
+
+const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+       if (!event)
+               return ERR_PTR(-EINVAL);
+
+       return &event->attr;
+}
+
 /*
  * inherit a event from parent task to child task:
  */
index b2be01b1aa9dcb7a70792fa381c264b229a106d0..c8aa3f75bc4db8ad7a2242aae6406bfd6f86f8c5 100644 (file)
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
                rb->aux_priv = NULL;
        }
 
-       for (pg = 0; pg < rb->aux_nr_pages; pg++)
-               rb_free_aux_page(rb, pg);
+       if (rb->aux_nr_pages) {
+               for (pg = 0; pg < rb->aux_nr_pages; pg++)
+                       rb_free_aux_page(rb, pg);
 
-       kfree(rb->aux_pages);
-       rb->aux_nr_pages = 0;
+               kfree(rb->aux_pages);
+               rb->aux_nr_pages = 0;
+       }
 }
 
 void rb_free_aux(struct ring_buffer *rb)
index 10e489c448fe4e934e2c203ca2aa7a8d0679bb5e..fdea0bee7b5a4d5e2fcf43ee3b92e1a37dea6c71 100644 (file)
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
 {
        return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
 }
+EXPORT_SYMBOL_GPL(kthread_should_park);
 
 /**
  * kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
 {
        __kthread_parkme(to_kthread(current));
 }
+EXPORT_SYMBOL_GPL(kthread_parkme);
 
 static int kthread(void *_create)
 {
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
        if (kthread)
                __kthread_unpark(k, kthread);
 }
+EXPORT_SYMBOL_GPL(kthread_unpark);
 
 /**
  * kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(kthread_park);
 
 /**
  * kthread_stop - stop a thread created by kthread_create().
index 04ab18151cc8fa174a5859124ee07c144f33d505..df19ae4debd09c134d438b57e4ead7c71462c2b6 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/hash.h>
 #include <linux/bootmem.h>
+#include <linux/debug_locks.h>
 
 /*
  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
 {
        struct __qspinlock *l = (void *)lock;
        struct pv_node *node;
+       u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
 
        /*
         * We must not unlock if SLOW, because in that case we must first
         * unhash. Otherwise it would be possible to have multiple @lock
         * entries, which would be BAD.
         */
-       if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+       if (likely(lockval == _Q_LOCKED_VAL))
                return;
 
+       if (unlikely(lockval != _Q_SLOW_VAL)) {
+               if (debug_locks_silent)
+                       return;
+               WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+               return;
+       }
+
        /*
         * Since the above failed to release, this must be the SLOW path.
         * Therefore start by looking up the blocked node and unhashing it.
index 4d2b82e610e2a48f429a700f0529d6dc5942700a..b86b7bf1be388d72fe92fb6038b4a67b4710df1f 100644 (file)
@@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name,
 }
 EXPORT_SYMBOL_GPL(find_symbol);
 
-/* Search for module by name: must hold module_mutex. */
+/*
+ * Search for module by name: must hold module_mutex (or preempt disabled
+ * for read-only access).
+ */
 static struct module *find_module_all(const char *name, size_t len,
                                      bool even_unformed)
 {
        struct module *mod;
 
-       module_assert_mutex();
+       module_assert_mutex_or_preempt();
 
        list_for_each_entry(mod, &modules, list) {
                if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len,
 
 struct module *find_module(const char *name)
 {
+       module_assert_mutex();
        return find_module_all(name, strlen(name), false);
 }
 EXPORT_SYMBOL_GPL(find_module);
index 836df8dac6ccd1230f21d20dc610429538cc59a1..0f6bbbe77b46c092d0de31e0c9eec8a0f17e6791 100644 (file)
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
 #ifdef SEGV_BNDERR
-               err |= __put_user(from->si_lower, &to->si_lower);
-               err |= __put_user(from->si_upper, &to->si_upper);
+               if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
+                       err |= __put_user(from->si_lower, &to->si_lower);
+                       err |= __put_user(from->si_upper, &to->si_upper);
+               }
 #endif
                break;
        case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
 {
-       siginfo_t info;
+       siginfo_t info = {};
        int ret = copy_siginfo_from_user32(&info, uinfo);
        if (unlikely(ret))
                return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
 {
-       siginfo_t info;
+       siginfo_t info = {};
 
        if (copy_siginfo_from_user32(&info, uinfo))
                return -EFAULT;
index 88a041adee901f5109c50e560cb5e13fc96f0357..ef9936df1b0491b5621501a495e5a20991aac4a7 100644 (file)
@@ -158,6 +158,35 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
        return &bpf_trace_printk_proto;
 }
 
+static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
+{
+       struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       struct perf_event *event;
+
+       if (unlikely(index >= array->map.max_entries))
+               return -E2BIG;
+
+       event = (struct perf_event *)array->ptrs[index];
+       if (!event)
+               return -ENOENT;
+
+       /*
+        * we don't know if the function is run successfully by the
+        * return value. It can be judged in other places, such as
+        * eBPF programs.
+        */
+       return perf_event_read_local(event);
+}
+
+const struct bpf_func_proto bpf_perf_event_read_proto = {
+       .func           = bpf_perf_event_read,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
 {
        switch (func_id) {
@@ -183,6 +212,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
                return bpf_get_trace_printk_proto();
        case BPF_FUNC_get_smp_processor_id:
                return &bpf_get_smp_processor_id_proto;
+       case BPF_FUNC_perf_event_read:
+               return &bpf_perf_event_read_proto;
        default:
                return NULL;
        }
index 3a2ef67db6c724f12b6d4ac0550ebb5fd58c5c58..278890dd104980514a7ba379f8be338a178100aa 100644 (file)
@@ -460,16 +460,6 @@ config ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 config LRU_CACHE
        tristate
 
-config AVERAGE
-       bool "Averaging functions"
-       help
-         This option is provided for the case where no in-kernel-tree
-         modules require averaging functions, but a module built outside
-         the kernel tree does. Such modules that use library averaging
-         functions require Y here.
-
-         If unsure, say N.
-
 config CLZ_TAB
        bool
 
diff --git a/lib/average.c b/lib/average.c
deleted file mode 100644 (file)
index 114d1be..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * lib/average.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2.  See the file COPYING for more details.
- */
-
-#include <linux/export.h>
-#include <linux/average.h>
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include <linux/log2.h>
-
-/**
- * DOC: Exponentially Weighted Moving Average (EWMA)
- *
- * These are generic functions for calculating Exponentially Weighted Moving
- * Averages (EWMA). We keep a structure with the EWMA parameters and a scaled
- * up internal representation of the average value to prevent rounding errors.
- * The factor for scaling up and the exponential weight (or decay rate) have to
- * be specified thru the init fuction. The structure should not be accessed
- * directly but only thru the helper functions.
- */
-
-/**
- * ewma_init() - Initialize EWMA parameters
- * @avg: Average structure
- * @factor: Factor to use for the scaled up internal value. The maximum value
- *     of averages can be ULONG_MAX/(factor*weight). For performance reasons
- *     factor has to be a power of 2.
- * @weight: Exponential weight, or decay rate. This defines how fast the
- *     influence of older values decreases. For performance reasons weight has
- *     to be a power of 2.
- *
- * Initialize the EWMA parameters for a given struct ewma @avg.
- */
-void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight)
-{
-       WARN_ON(!is_power_of_2(weight) || !is_power_of_2(factor));
-
-       avg->weight = ilog2(weight);
-       avg->factor = ilog2(factor);
-       avg->internal = 0;
-}
-EXPORT_SYMBOL(ewma_init);
-
-/**
- * ewma_add() - Exponentially weighted moving average (EWMA)
- * @avg: Average structure
- * @val: Current value
- *
- * Add a sample to the average.
- */
-struct ewma *ewma_add(struct ewma *avg, unsigned long val)
-{
-       unsigned long internal = ACCESS_ONCE(avg->internal);
-
-       ACCESS_ONCE(avg->internal) = internal ?
-               (((internal << avg->weight) - internal) +
-                       (val << avg->factor)) >> avg->weight :
-               (val << avg->factor);
-       return avg;
-}
-EXPORT_SYMBOL(ewma_add);
index df30632f0bef9ec1c36a48d83a6eb87cd18ee405..ff19f66d3f7fbd635a44cc03614b5fbe4485cc56 100644 (file)
@@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
        unsigned long align_mask = 0;
 
        if (align_order > 0)
-               align_mask = 0xffffffffffffffffl >> (64 - align_order);
+               align_mask = ~0ul >> (BITS_PER_LONG - align_order);
 
        /* Sanity check */
        if (unlikely(npages == 0)) {
index 3afddf2026c983d279fe822a0c42b7f1a65f193a..d1377390b3adda70bafe7464186e034d1fd7bd01 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/random.h>
+#include <linux/highmem.h>
 
 /* General test specific settings */
 #define MAX_SUBTESTS   3
@@ -56,6 +57,7 @@
 /* Flags that can be passed to test cases */
 #define FLAG_NO_DATA           BIT(0)
 #define FLAG_EXPECTED_FAIL     BIT(1)
+#define FLAG_SKB_FRAG          BIT(2)
 
 enum {
        CLASSIC  = BIT(6),      /* Old BPF instructions only. */
@@ -81,6 +83,7 @@ struct bpf_test {
                __u32 result;
        } test[MAX_SUBTESTS];
        int (*fill_helper)(struct bpf_test *self);
+       __u8 frag_data[MAX_DATA];
 };
 
 /* Large test cases need separate allocation and fill handler. */
@@ -4490,6 +4493,602 @@ static struct bpf_test tests[] = {
                { { 1, 0xbef } },
                .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
        },
+       /*
+        * LD_IND / LD_ABS on fragmented SKBs
+        */
+       {
+               "LD_IND byte frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { },
+               { {0x40, 0x42} },
+               .frag_data = {
+                       0x42, 0x00, 0x00, 0x00,
+                       0x43, 0x44, 0x00, 0x00,
+                       0x21, 0x07, 0x19, 0x83,
+               },
+       },
+       {
+               "LD_IND halfword frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { },
+               { {0x40, 0x4344} },
+               .frag_data = {
+                       0x42, 0x00, 0x00, 0x00,
+                       0x43, 0x44, 0x00, 0x00,
+                       0x21, 0x07, 0x19, 0x83,
+               },
+       },
+       {
+               "LD_IND word frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { },
+               { {0x40, 0x21071983} },
+               .frag_data = {
+                       0x42, 0x00, 0x00, 0x00,
+                       0x43, 0x44, 0x00, 0x00,
+                       0x21, 0x07, 0x19, 0x83,
+               },
+       },
+       {
+               "LD_IND halfword mixed head/frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { [0x3e] = 0x25, [0x3f] = 0x05, },
+               { {0x40, 0x0519} },
+               .frag_data = { 0x19, 0x82 },
+       },
+       {
+               "LD_IND word mixed head/frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { [0x3e] = 0x25, [0x3f] = 0x05, },
+               { {0x40, 0x25051982} },
+               .frag_data = { 0x19, 0x82 },
+       },
+       {
+               "LD_ABS byte frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { },
+               { {0x40, 0x42} },
+               .frag_data = {
+                       0x42, 0x00, 0x00, 0x00,
+                       0x43, 0x44, 0x00, 0x00,
+                       0x21, 0x07, 0x19, 0x83,
+               },
+       },
+       {
+               "LD_ABS halfword frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { },
+               { {0x40, 0x4344} },
+               .frag_data = {
+                       0x42, 0x00, 0x00, 0x00,
+                       0x43, 0x44, 0x00, 0x00,
+                       0x21, 0x07, 0x19, 0x83,
+               },
+       },
+       {
+               "LD_ABS word frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { },
+               { {0x40, 0x21071983} },
+               .frag_data = {
+                       0x42, 0x00, 0x00, 0x00,
+                       0x43, 0x44, 0x00, 0x00,
+                       0x21, 0x07, 0x19, 0x83,
+               },
+       },
+       {
+               "LD_ABS halfword mixed head/frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { [0x3e] = 0x25, [0x3f] = 0x05, },
+               { {0x40, 0x0519} },
+               .frag_data = { 0x19, 0x82 },
+       },
+       {
+               "LD_ABS word mixed head/frag",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_SKB_FRAG,
+               { [0x3e] = 0x25, [0x3f] = 0x05, },
+               { {0x40, 0x25051982} },
+               .frag_data = { 0x19, 0x82 },
+       },
+       /*
+        * LD_IND / LD_ABS on non fragmented SKBs
+        */
+       {
+               /*
+                * this tests that the JIT/interpreter correctly resets X
+                * before using it in an LD_IND instruction.
+                */
+               "LD_IND byte default X",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x1] = 0x42 },
+               { {0x40, 0x42 } },
+       },
+       {
+               "LD_IND byte positive offset",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x82 } },
+       },
+       {
+               "LD_IND byte negative offset",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               { [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+               { {0x40, 0x05 } },
+       },
+       {
+               "LD_IND halfword positive offset",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+               },
+               { {0x40, 0xdd88 } },
+       },
+       {
+               "LD_IND halfword negative offset",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+               },
+               { {0x40, 0xbb66 } },
+       },
+       {
+               "LD_IND halfword unaligned",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+               },
+               { {0x40, 0x66cc } },
+       },
+       {
+               "LD_IND word positive offset",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0xee99ffaa } },
+       },
+       {
+               "LD_IND word negative offset",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0xaa55bb66 } },
+       },
+       {
+               "LD_IND word unaligned (addr & 3 == 2)",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0xbb66cc77 } },
+       },
+       {
+               "LD_IND word unaligned (addr & 3 == 1)",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0x55bb66cc } },
+       },
+       {
+               "LD_IND word unaligned (addr & 3 == 3)",
+               .u.insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+                       BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0x66cc77dd } },
+       },
+       {
+               "LD_ABS byte",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0xcc } },
+       },
+       {
+               "LD_ABS halfword",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0xdd88 } },
+       },
+       {
+               "LD_ABS halfword unaligned",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0x99ff } },
+       },
+       {
+               "LD_ABS word",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0xaa55bb66 } },
+       },
+       {
+               "LD_ABS word unaligned (addr & 3 == 2)",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0xdd88ee99 } },
+       },
+       {
+               "LD_ABS word unaligned (addr & 3 == 1)",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0x77dd88ee } },
+       },
+       {
+               "LD_ABS word unaligned (addr & 3 == 3)",
+               .u.insns = {
+                       BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC,
+               {
+                       [0x1c] = 0xaa, [0x1d] = 0x55,
+                       [0x1e] = 0xbb, [0x1f] = 0x66,
+                       [0x20] = 0xcc, [0x21] = 0x77,
+                       [0x22] = 0xdd, [0x23] = 0x88,
+                       [0x24] = 0xee, [0x25] = 0x99,
+                       [0x26] = 0xff, [0x27] = 0xaa,
+               },
+               { {0x40, 0x88ee99ff } },
+       },
+       /*
+        * verify that the interpreter or JIT correctly sets A and X
+        * to 0.
+        */
+       {
+               "ADD default X",
+               .u.insns = {
+                       /*
+                        * A = 0x42
+                        * A = A + X
+                        * ret A
+                        */
+                       BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x42 } },
+       },
+       {
+               "ADD default A",
+               .u.insns = {
+                       /*
+                        * A = A + 0x42
+                        * ret A
+                        */
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x42 } },
+       },
+       {
+               "SUB default X",
+               .u.insns = {
+                       /*
+                        * A = 0x66
+                        * A = A - X
+                        * ret A
+                        */
+                       BPF_STMT(BPF_LD | BPF_IMM, 0x66),
+                       BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x66 } },
+       },
+       {
+               "SUB default A",
+               .u.insns = {
+                       /*
+                        * A = A - -0x66
+                        * ret A
+                        */
+                       BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x66 } },
+       },
+       {
+               "MUL default X",
+               .u.insns = {
+                       /*
+                        * A = 0x42
+                        * A = A * X
+                        * ret A
+                        */
+                       BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+                       BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x0 } },
+       },
+       {
+               "MUL default A",
+               .u.insns = {
+                       /*
+                        * A = A * 0x66
+                        * ret A
+                        */
+                       BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x0 } },
+       },
+       {
+               "DIV default X",
+               .u.insns = {
+                       /*
+                        * A = 0x42
+                        * A = A / X ; this halt the filter execution if X is 0
+                        * ret 0x42
+                        */
+                       BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0x42),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x0 } },
+       },
+       {
+               "DIV default A",
+               .u.insns = {
+                       /*
+                        * A = A / 1
+                        * ret A
+                        */
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1),
+                       BPF_STMT(BPF_RET | BPF_A, 0x0),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x0 } },
+       },
+       {
+               "JMP EQ default A",
+               .u.insns = {
+                       /*
+                        * cmp A, 0x0, 0, 1
+                        * ret 0x42
+                        * ret 0x66
+                        */
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 0x42),
+                       BPF_STMT(BPF_RET | BPF_K, 0x66),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x42 } },
+       },
+       {
+               "JMP EQ default X",
+               .u.insns = {
+                       /*
+                        * A = 0x0
+                        * cmp A, X, 0, 1
+                        * ret 0x42
+                        * ret 0x66
+                        */
+                       BPF_STMT(BPF_LD | BPF_IMM, 0x0),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 0x42),
+                       BPF_STMT(BPF_RET | BPF_K, 0x66),
+               },
+               CLASSIC | FLAG_NO_DATA,
+               {},
+               { {0x1, 0x42 } },
+       },
 };
 
 static struct net_device dev;
@@ -4525,6 +5124,9 @@ static struct sk_buff *populate_skb(char *buf, int size)
 
 static void *generate_test_data(struct bpf_test *test, int sub)
 {
+       struct sk_buff *skb;
+       struct page *page;
+
        if (test->aux & FLAG_NO_DATA)
                return NULL;
 
@@ -4532,7 +5134,38 @@ static void *generate_test_data(struct bpf_test *test, int sub)
         * subtests generate skbs of different sizes based on
         * the same data.
         */
-       return populate_skb(test->data, test->test[sub].data_size);
+       skb = populate_skb(test->data, test->test[sub].data_size);
+       if (!skb)
+               return NULL;
+
+       if (test->aux & FLAG_SKB_FRAG) {
+               /*
+                * when the test requires a fragmented skb, add a
+                * single fragment to the skb, filled with
+                * test->frag_data.
+                */
+               void *ptr;
+
+               page = alloc_page(GFP_KERNEL);
+
+               if (!page)
+                       goto err_kfree_skb;
+
+               ptr = kmap(page);
+               if (!ptr)
+                       goto err_free_page;
+               memcpy(ptr, test->frag_data, MAX_DATA);
+               kunmap(page);
+               skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
+       }
+
+       return skb;
+
+err_free_page:
+       __free_page(page);
+err_kfree_skb:
+       kfree_skb(skb);
+       return NULL;
 }
 
 static void release_test_data(const struct bpf_test *test, void *data)
@@ -4672,6 +5305,11 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
                        break;
 
                data = generate_test_data(test, i);
+               if (!data && !(test->aux & FLAG_NO_DATA)) {
+                       pr_cont("data generation failed ");
+                       err_cnt++;
+                       break;
+               }
                ret = __run_one(fp, data, runs, &duration);
                release_test_data(test, data);
 
@@ -4687,10 +5325,73 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
        return err_cnt;
 }
 
+static char test_name[64];
+module_param_string(test_name, test_name, sizeof(test_name), 0);
+
+static int test_id = -1;
+module_param(test_id, int, 0);
+
+static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 };
+module_param_array(test_range, int, NULL, 0);
+
+static __init int find_test_index(const char *test_name)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(tests); i++) {
+               if (!strcmp(tests[i].descr, test_name))
+                       return i;
+       }
+       return -1;
+}
+
 static __init int prepare_bpf_tests(void)
 {
        int i;
 
+       if (test_id >= 0) {
+               /*
+                * if a test_id was specified, use test_range to
+                * cover only that test.
+                */
+               if (test_id >= ARRAY_SIZE(tests)) {
+                       pr_err("test_bpf: invalid test_id specified.\n");
+                       return -EINVAL;
+               }
+
+               test_range[0] = test_id;
+               test_range[1] = test_id;
+       } else if (*test_name) {
+               /*
+                * if a test_name was specified, find it and setup
+                * test_range to cover only that test.
+                */
+               int idx = find_test_index(test_name);
+
+               if (idx < 0) {
+                       pr_err("test_bpf: no test named '%s' found.\n",
+                              test_name);
+                       return -EINVAL;
+               }
+               test_range[0] = idx;
+               test_range[1] = idx;
+       } else {
+               /*
+                * check that the supplied test_range is valid.
+                */
+               if (test_range[0] >= ARRAY_SIZE(tests) ||
+                   test_range[1] >= ARRAY_SIZE(tests) ||
+                   test_range[0] < 0 || test_range[1] < 0) {
+                       pr_err("test_bpf: test_range is out of bound.\n");
+                       return -EINVAL;
+               }
+
+               if (test_range[1] < test_range[0]) {
+                       pr_err("test_bpf: test_range is ending before it starts.\n");
+                       return -EINVAL;
+               }
+       }
+
        for (i = 0; i < ARRAY_SIZE(tests); i++) {
                if (tests[i].fill_helper &&
                    tests[i].fill_helper(&tests[i]) < 0)
@@ -4710,6 +5411,11 @@ static __init void destroy_bpf_tests(void)
        }
 }
 
+static bool exclude_test(int test_id)
+{
+       return test_id < test_range[0] || test_id > test_range[1];
+}
+
 static __init int test_bpf(void)
 {
        int i, err_cnt = 0, pass_cnt = 0;
@@ -4719,6 +5425,9 @@ static __init int test_bpf(void)
                struct bpf_prog *fp;
                int err;
 
+               if (exclude_test(i))
+                       continue;
+
                pr_info("#%d %s ", i, tests[i].descr);
 
                fp = generate_filter(i, &err);
index 9af7cefb195d3d3537366396d0af31180292ece9..8c1ad1ced72cc1deaed9439a789ec5e672e65ccf 100644 (file)
 #include <linux/init.h>
 #include <linux/jhash.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/rcupdate.h>
 #include <linux/rhashtable.h>
+#include <linux/semaphore.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #define MAX_ENTRIES    1000000
 #define TEST_INSERT_FAIL INT_MAX
@@ -45,11 +48,21 @@ static int size = 8;
 module_param(size, int, 0);
 MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
 
+static int tcount = 10;
+module_param(tcount, int, 0);
+MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
+
 struct test_obj {
        int                     value;
        struct rhash_head       node;
 };
 
+struct thread_data {
+       int id;
+       struct task_struct *task;
+       struct test_obj *objs;
+};
+
 static struct test_obj array[MAX_ENTRIES];
 
 static struct rhashtable_params test_rht_params = {
@@ -60,6 +73,9 @@ static struct rhashtable_params test_rht_params = {
        .nulls_base = (3U << RHT_BASE_SHIFT),
 };
 
+static struct semaphore prestart_sem;
+static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+
 static int __init test_rht_lookup(struct rhashtable *ht)
 {
        unsigned int i;
@@ -200,10 +216,97 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
 
 static struct rhashtable ht;
 
+static int thread_lookup_test(struct thread_data *tdata)
+{
+       int i, err = 0;
+
+       for (i = 0; i < entries; i++) {
+               struct test_obj *obj;
+               int key = (tdata->id << 16) | i;
+
+               obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
+               if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) {
+                       pr_err("  found unexpected object %d\n", key);
+                       err++;
+               } else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) {
+                       pr_err("  object %d not found!\n", key);
+                       err++;
+               } else if (obj && (obj->value != key)) {
+                       pr_err("  wrong object returned (got %d, expected %d)\n",
+                              obj->value, key);
+                       err++;
+               }
+       }
+       return err;
+}
+
+static int threadfunc(void *data)
+{
+       int i, step, err = 0, insert_fails = 0;
+       struct thread_data *tdata = data;
+
+       up(&prestart_sem);
+       if (down_interruptible(&startup_sem))
+               pr_err("  thread[%d]: down_interruptible failed\n", tdata->id);
+
+       for (i = 0; i < entries; i++) {
+               tdata->objs[i].value = (tdata->id << 16) | i;
+               err = rhashtable_insert_fast(&ht, &tdata->objs[i].node,
+                                            test_rht_params);
+               if (err == -ENOMEM || err == -EBUSY) {
+                       tdata->objs[i].value = TEST_INSERT_FAIL;
+                       insert_fails++;
+               } else if (err) {
+                       pr_err("  thread[%d]: rhashtable_insert_fast failed\n",
+                              tdata->id);
+                       goto out;
+               }
+       }
+       if (insert_fails)
+               pr_info("  thread[%d]: %d insert failures\n",
+                       tdata->id, insert_fails);
+
+       err = thread_lookup_test(tdata);
+       if (err) {
+               pr_err("  thread[%d]: rhashtable_lookup_test failed\n",
+                      tdata->id);
+               goto out;
+       }
+
+       for (step = 10; step > 0; step--) {
+               for (i = 0; i < entries; i += step) {
+                       if (tdata->objs[i].value == TEST_INSERT_FAIL)
+                               continue;
+                       err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
+                                                    test_rht_params);
+                       if (err) {
+                               pr_err("  thread[%d]: rhashtable_remove_fast failed\n",
+                                      tdata->id);
+                               goto out;
+                       }
+                       tdata->objs[i].value = TEST_INSERT_FAIL;
+               }
+               err = thread_lookup_test(tdata);
+               if (err) {
+                       pr_err("  thread[%d]: rhashtable_lookup_test (2) failed\n",
+                              tdata->id);
+                       goto out;
+               }
+       }
+out:
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
+       }
+       return err;
+}
+
 static int __init test_rht_init(void)
 {
-       int i, err;
+       int i, err, started_threads = 0, failed_threads = 0;
        u64 total_time = 0;
+       struct thread_data *tdata;
+       struct test_obj *objs;
 
        entries = min(entries, MAX_ENTRIES);
 
@@ -239,6 +342,57 @@ static int __init test_rht_init(void)
        do_div(total_time, runs);
        pr_info("Average test time: %llu\n", total_time);
 
+       if (!tcount)
+               return 0;
+
+       pr_info("Testing concurrent rhashtable access from %d threads\n",
+               tcount);
+       sema_init(&prestart_sem, 1 - tcount);
+       tdata = vzalloc(tcount * sizeof(struct thread_data));
+       if (!tdata)
+               return -ENOMEM;
+       objs  = vzalloc(tcount * entries * sizeof(struct test_obj));
+       if (!objs) {
+               vfree(tdata);
+               return -ENOMEM;
+       }
+
+       err = rhashtable_init(&ht, &test_rht_params);
+       if (err < 0) {
+               pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+                       err);
+               vfree(tdata);
+               vfree(objs);
+               return -EINVAL;
+       }
+       for (i = 0; i < tcount; i++) {
+               tdata[i].id = i;
+               tdata[i].objs = objs + i * entries;
+               tdata[i].task = kthread_run(threadfunc, &tdata[i],
+                                           "rhashtable_thrad[%d]", i);
+               if (IS_ERR(tdata[i].task))
+                       pr_err(" kthread_run failed for thread %d\n", i);
+               else
+                       started_threads++;
+       }
+       if (down_interruptible(&prestart_sem))
+               pr_err("  down interruptible failed\n");
+       for (i = 0; i < tcount; i++)
+               up(&startup_sem);
+       for (i = 0; i < tcount; i++) {
+               if (IS_ERR(tdata[i].task))
+                       continue;
+               if ((err = kthread_stop(tdata[i].task))) {
+                       pr_warn("Test failed: thread %d returned: %d\n",
+                               i, err);
+                       failed_threads++;
+               }
+       }
+       pr_info("Started %d threads, %d failed\n",
+               started_threads, failed_threads);
+       rhashtable_destroy(&ht);
+       vfree(tdata);
+       vfree(objs);
        return 0;
 }
 
index 1132d733556dbc330d32eda5460f55e6e067b627..17c75a4246c8bbab8b56fe4d562cd85ea670a21f 100644 (file)
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
 extern struct cma cma_areas[MAX_CMA_AREAS];
 extern unsigned cma_area_count;
 
-static unsigned long cma_bitmap_maxno(struct cma *cma)
+static inline unsigned long cma_bitmap_maxno(struct cma *cma)
 {
        return cma->count >> cma->order_per_bit;
 }
index c107094f79bae9ee895bd6bf30976d900f16c141..097c7a4bfbd9f13f4845acae80d73aa7b0e66fb2 100644 (file)
@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
                /* after clearing PageTail the gup refcount can be released */
                smp_mb__after_atomic();
 
-               /*
-                * retain hwpoison flag of the poisoned tail page:
-                *   fix for the unsuitable process killed on Guest Machine(KVM)
-                *   by the memory-failure.
-                */
-               page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
+               page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
                page_tail->flags |= (page->flags &
                                     ((1L << PG_referenced) |
                                      (1L << PG_swapbacked) |
index 6c513a63ea84c3c7ffd41201b7a419ff7b6dfd5d..7b28e9cdf1c7686428fe49802fced44088043555 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains shadow memory manipulation code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
  * Some of code borrowed from https://github.com/xairy/linux by
  *        Andrey Konovalov <adech.fo@gmail.com>
index 680ceedf810ab4c9cd08c9929f5d445de9f5aa6a..e07c94fbd0ac5a141ecf95ab7d39d046fea13e67 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains error reporting code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
  * Some of code borrowed from https://github.com/xairy/linux by
  *        Andrey Konovalov <adech.fo@gmail.com>
index c53543d892828e75796239d6ce36afa90203085b..1f4446a90cef07c67ee1082b83f0ca87ebfefea1 100644 (file)
@@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page)
         * directly for tail pages.
         */
        if (PageTransHuge(head)) {
+               /*
+                * Non anonymous thp exists only in allocation/free time. We
+                * can't handle such a case correctly, so let's give it up.
+                * This should be better than triggering BUG_ON when kernel
+                * tries to touch the "partially handled" page.
+                */
+               if (!PageAnon(head)) {
+                       pr_err("MCE: %#lx: non anonymous thp\n",
+                               page_to_pfn(page));
+                       return 0;
+               }
+
                if (get_page_unless_zero(head)) {
                        if (PageTail(page))
                                get_page(page);
@@ -1134,17 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        }
 
        if (!PageHuge(p) && PageTransHuge(hpage)) {
-               if (!PageAnon(hpage)) {
-                       pr_err("MCE: %#lx: non anonymous thp\n", pfn);
-                       if (TestClearPageHWPoison(p))
-                               atomic_long_sub(nr_pages, &num_poisoned_pages);
-                       put_page(p);
-                       if (p != hpage)
-                               put_page(hpage);
-                       return -EBUSY;
-               }
-               if (unlikely(split_huge_page(hpage))) {
-                       pr_err("MCE: %#lx: thp split failed\n", pfn);
+               if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
+                       if (!PageAnon(hpage))
+                               pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+                       else
+                               pr_err("MCE: %#lx: thp split failed\n", pfn);
                        if (TestClearPageHWPoison(p))
                                atomic_long_sub(nr_pages, &num_poisoned_pages);
                        put_page(p);
@@ -1209,9 +1215,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        if (!PageHWPoison(p)) {
                printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
                atomic_long_sub(nr_pages, &num_poisoned_pages);
+               unlock_page(hpage);
                put_page(hpage);
-               res = 0;
-               goto out;
+               return 0;
        }
        if (hwpoison_filter(p)) {
                if (TestClearPageHWPoison(p))
@@ -1535,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
                 */
                ret = __get_any_page(page, pfn, 0);
                if (!PageLRU(page)) {
+                       /* Drop page reference which is from __get_any_page() */
+                       put_page(page);
                        pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
                                pfn, page->flags);
                        return -EIO;
@@ -1564,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
        unlock_page(hpage);
 
        ret = isolate_huge_page(hpage, &pagelist);
-       if (ret) {
-               /*
-                * get_any_page() and isolate_huge_page() takes a refcount each,
-                * so need to drop one here.
-                */
-               put_page(hpage);
-       } else {
+       /*
+        * get_any_page() and isolate_huge_page() takes a refcount each,
+        * so need to drop one here.
+        */
+       put_page(hpage);
+       if (!ret) {
                pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
                return -EBUSY;
        }
@@ -1656,6 +1663,8 @@ static int __soft_offline_page(struct page *page, int flags)
                inc_zone_page_state(page, NR_ISOLATED_ANON +
                                        page_is_file_cache(page));
                list_add(&page->lru, &pagelist);
+               if (!TestSetPageHWPoison(page))
+                       atomic_long_inc(&num_poisoned_pages);
                ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
                                        MIGRATE_SYNC, MR_MEMORY_FAILURE);
                if (ret) {
@@ -1670,9 +1679,8 @@ static int __soft_offline_page(struct page *page, int flags)
                                pfn, ret, page->flags);
                        if (ret > 0)
                                ret = -EIO;
-               } else {
-                       SetPageHWPoison(page);
-                       atomic_long_inc(&num_poisoned_pages);
+                       if (TestClearPageHWPoison(page))
+                               atomic_long_dec(&num_poisoned_pages);
                }
        } else {
                pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
index 26fbba7d888f887c3383c1bb5829cb4f204e2040..6da82bcb0a8b66b7326c1a021a7eac3b476cd85e 100644 (file)
@@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
        int nr_pages = PAGES_PER_SECTION;
        int nid = pgdat->node_id;
        int zone_type;
-       unsigned long flags;
+       unsigned long flags, pfn;
        int ret;
 
        zone_type = zone - pgdat->node_zones;
@@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
        pgdat_resize_unlock(zone->zone_pgdat, &flags);
        memmap_init_zone(nr_pages, nid, zone_type,
                         phys_start_pfn, MEMMAP_HOTPLUG);
+
+       /* online_page_range is called later and expects pages reserved */
+       for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
+               if (!pfn_valid(pfn))
+                       continue;
+
+               SetPageReserved(pfn_to_page(pfn));
+       }
        return 0;
 }
 
@@ -1269,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
 
        /* create new memmap entry */
        firmware_map_add_hotplug(start, start + size, "System RAM");
+       memblock_add_node(start, size, nid);
 
        goto out;
 
@@ -2005,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
 
        /* remove memmap entry */
        firmware_map_remove(start, start + size, "System RAM");
+       memblock_free(start, size);
+       memblock_remove(start, size);
 
        arch_remove_memory(start, size);
 
index ee401e4e5ef187c92247d03dd6d2ea0893092d1c..eb4267107d1fee9fa2a55e4076c014500e3b1edb 100644 (file)
@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
        /* Establish migration ptes or remove ptes */
        if (page_mapped(page)) {
                try_to_unmap(page,
-                       TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+                       TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
+                       TTU_IGNORE_HWPOISON);
                page_was_mapped = 1;
        }
 
@@ -950,7 +951,10 @@ out:
                list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
-               if (reason != MR_MEMORY_FAILURE)
+               /* Soft-offlined page shouldn't go through lru cache list */
+               if (reason == MR_MEMORY_FAILURE)
+                       put_page(page);
+               else
                        putback_lru_page(page);
        }
 
index 22cddd3e5de8433952e99438d3260ae9ff20bd8d..5cccc127ef81f1d64ca46f9ce9ad50f519d4ea9f 100644 (file)
@@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = {
  */
 void __init page_writeback_init(void)
 {
+       BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
+
        writeback_set_ratelimit();
        register_cpu_notifier(&ratelimit_nb);
-
-       BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
 }
 
 /**
index ef19f22b2b7de1728fb4ed8d6451d29bb993a928..df959b7d608518edf9ab5c577e6b19afa8d88ed9 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/interrupt.h>
-#include <linux/rwsem.h>
 #include <linux/pagemap.h>
 #include <linux/jiffies.h>
 #include <linux/bootmem.h>
@@ -981,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page,
 
 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
        defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
-/* Only safe to use early in boot when initialisation is single-threaded */
+
 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
 
 int __meminit early_pfn_to_nid(unsigned long pfn)
 {
+       static DEFINE_SPINLOCK(early_pfn_lock);
        int nid;
 
-       /* The system will behave unpredictably otherwise */
-       BUG_ON(system_state != SYSTEM_BOOTING);
-
+       spin_lock(&early_pfn_lock);
        nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
-       if (nid >= 0)
-               return nid;
-       /* just returns 0 */
-       return 0;
+       if (nid < 0)
+               nid = 0;
+       spin_unlock(&early_pfn_lock);
+
+       return nid;
 }
 #endif
 
@@ -1060,7 +1059,15 @@ static void __init deferred_free_range(struct page *page,
                __free_pages_boot_core(page, pfn, 0);
 }
 
-static __initdata DECLARE_RWSEM(pgdat_init_rwsem);
+/* Completion tracking for deferred_init_memmap() threads */
+static atomic_t pgdat_init_n_undone __initdata;
+static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
+
+static inline void __init pgdat_init_report_one_done(void)
+{
+       if (atomic_dec_and_test(&pgdat_init_n_undone))
+               complete(&pgdat_init_all_done_comp);
+}
 
 /* Initialise remaining memory on a node */
 static int __init deferred_init_memmap(void *data)
@@ -1077,7 +1084,7 @@ static int __init deferred_init_memmap(void *data)
        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
 
        if (first_init_pfn == ULONG_MAX) {
-               up_read(&pgdat_init_rwsem);
+               pgdat_init_report_one_done();
                return 0;
        }
 
@@ -1177,7 +1184,8 @@ free_range:
 
        pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
                                        jiffies_to_msecs(jiffies - start));
-       up_read(&pgdat_init_rwsem);
+
+       pgdat_init_report_one_done();
        return 0;
 }
 
@@ -1185,14 +1193,17 @@ void __init page_alloc_init_late(void)
 {
        int nid;
 
+       /* There will be num_node_state(N_MEMORY) threads */
+       atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
        for_each_node_state(nid, N_MEMORY) {
-               down_read(&pgdat_init_rwsem);
                kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
        }
 
        /* Block until all are initialised */
-       down_write(&pgdat_init_rwsem);
-       up_write(&pgdat_init_rwsem);
+       wait_for_completion(&pgdat_init_all_done_comp);
+
+       /* Reinit limits that are based on free pages after the kernel is up */
+       files_maxfiles_init();
 }
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
@@ -1285,6 +1296,10 @@ static inline int check_new_page(struct page *page)
                bad_reason = "non-NULL mapping";
        if (unlikely(atomic_read(&page->_count) != 0))
                bad_reason = "nonzero _count";
+       if (unlikely(page->flags & __PG_HWPOISON)) {
+               bad_reason = "HWPoisoned (hardware-corrupted)";
+               bad_flags = __PG_HWPOISON;
+       }
        if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
                bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
                bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
@@ -5045,6 +5060,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
 {
        unsigned long zone_start_pfn, zone_end_pfn;
 
+       /* When hotadd a new node, the node should be empty */
+       if (!node_start_pfn && !node_end_pfn)
+               return 0;
+
        /* Get the start and end of the zone */
        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5108,6 +5127,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
        unsigned long zone_start_pfn, zone_end_pfn;
 
+       /* When hotadd a new node, the node should be empty */
+       if (!node_start_pfn && !node_end_pfn)
+               return 0;
+
        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
 
index 4caf8ed24d6586e32ab910f28f945c01cef6373b..dbe0c1e8349c72ac569a58289da702a841104951 100644 (file)
@@ -3363,8 +3363,8 @@ put_path:
  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
  *     kernel internal.  There will be NO LSM permission checks against the
  *     underlying inode.  So users of this interface must do LSM checks at a
- *     higher layer.  The one user is the big_key implementation.  LSM checks
- *     are provided at the key level rather than the inode level.
+ *     higher layer.  The users are the big_key and shm implementations.  LSM
+ *     checks are provided at the key or shm level rather than the inode.
  * @name: name for dentry (to be seen in /proc/<pid>/maps
  * @size: size to be set for the file
  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
index 3e5f8f29c28640e44af5f5f9d1c3553986064588..86831105a09f44ffae37c074a6e5587c5b7056ce 100644 (file)
@@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache;
                SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
                SLAB_FAILSLAB)
 
-#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
-               SLAB_CACHE_DMA | SLAB_NOTRACK)
+#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
 
 /*
  * Merge control. If this is set then no merging of slab caches will occur.
index e61445dce04e3cc83e9704e84f3d5bf9074b31db..8286938c70ded6b82d4268174c92669a90eeb674 100644 (file)
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 *    caller can stall after page list has been processed.
                 *
                 * 2) Global or new memcg reclaim encounters a page that is
-                *    not marked for immediate reclaim or the caller does not
-                *    have __GFP_IO. In this case mark the page for immediate
+                *    not marked for immediate reclaim, or the caller does not
+                *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
+                *    not to fs). In this case mark the page for immediate
                 *    reclaim and continue scanning.
                 *
-                *    __GFP_IO is checked  because a loop driver thread might
+                *    Require may_enter_fs because we would wait on fs, which
+                *    may not have submitted IO yet. And the loop driver might
                 *    enter reclaim, and deadlock if it waits on a page for
                 *    which it is needed to do the write (loop masks off
                 *    __GFP_IO|__GFP_FS for this reason); but more thought
                 *    would probably show more reasons.
                 *
-                *    Don't require __GFP_FS, since we're not going into the
-                *    FS, just waiting on its writeback completion. Worryingly,
-                *    ext4 gfs2 and xfs allocate pages with
-                *    grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
-                *    may_enter_fs here is liable to OOM on them.
-                *
                 * 3) Legacy memcg encounters a page that is not already marked
                 *    PageReclaim. memcg does not have any dirty pages
                 *    throttling so we could easily OOM just because too many
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                        /* Case 2 above */
                        } else if (sane_reclaim(sc) ||
-                           !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+                           !PageReclaim(page) || !may_enter_fs) {
                                /*
                                 * This is slightly racy - end_page_writeback()
                                 * might have just cleared PageReclaim, then
index eb8baa72adc8ab2c84c390a54bcfa4db429d4c0d..c6ffc55ee0d7adda3d0dfb0975061d72fe7af6ac 100644 (file)
@@ -1,6 +1,6 @@
 obj-$(CONFIG_6LOWPAN) += 6lowpan.o
 
-6lowpan-y := iphc.o nhc.o
+6lowpan-y := core.o iphc.o nhc.o
 
 #rfc6282 nhcs
 obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
new file mode 100644 (file)
index 0000000..ae1896f
--- /dev/null
@@ -0,0 +1,40 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
+ */
+
+#include <linux/module.h>
+
+#include <net/6lowpan.h>
+
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
+{
+       lowpan_priv(dev)->lltype = lltype;
+}
+EXPORT_SYMBOL(lowpan_netdev_setup);
+
+static int __init lowpan_module_init(void)
+{
+       request_module_nowait("ipv6");
+
+       request_module_nowait("nhc_dest");
+       request_module_nowait("nhc_fragment");
+       request_module_nowait("nhc_hop");
+       request_module_nowait("nhc_ipv6");
+       request_module_nowait("nhc_mobility");
+       request_module_nowait("nhc_routing");
+       request_module_nowait("nhc_udp");
+
+       return 0;
+}
+module_init(lowpan_module_init);
+
+MODULE_LICENSE("GPL");
index 9055d7b9d1129d69e34f8ed255922b2e340fcbb1..1e0071fdcf72a036d6697f2e453ba303b69abbe0 100644 (file)
@@ -48,7 +48,6 @@
 
 #include <linux/bitops.h>
 #include <linux/if_arp.h>
-#include <linux/module.h>
 #include <linux/netdevice.h>
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
@@ -284,7 +283,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
                if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
                        return -EINVAL;
 
-               hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
+               hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
                memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
                skb_pull(skb, 2);
                break;
@@ -610,21 +609,3 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        return 0;
 }
 EXPORT_SYMBOL_GPL(lowpan_header_compress);
-
-static int __init lowpan_module_init(void)
-{
-       request_module_nowait("ipv6");
-
-       request_module_nowait("nhc_dest");
-       request_module_nowait("nhc_fragment");
-       request_module_nowait("nhc_hop");
-       request_module_nowait("nhc_ipv6");
-       request_module_nowait("nhc_mobility");
-       request_module_nowait("nhc_routing");
-       request_module_nowait("nhc_udp");
-
-       return 0;
-}
-module_init(lowpan_module_init);
-
-MODULE_LICENSE("GPL");
index 01d7ba840df8dbf48b07e3c8697bb7c11f424a8d..fded86508117dad4d81aad327b287aee991a55be 100644 (file)
@@ -791,10 +791,9 @@ void vlan_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
-       dev->priv_flags         |= IFF_802_1Q_VLAN;
+       dev->priv_flags         |= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
        dev->priv_flags         &= ~IFF_TX_SKB_SHARING;
        netif_keep_dst(dev);
-       dev->tx_queue_len       = 0;
 
        dev->netdev_ops         = &vlan_netdev_ops;
        dev->destructor         = vlan_dev_free;
index fb54e6aed096edd267fc211e4cd2a0139fe71f8a..cc7d87d6498785535017ddcdbbce6da8e3f8679c 100644 (file)
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -453,7 +454,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
        int j;
 
        /* check if orig node candidate is running DAT */
-       if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
+       if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
                goto out;
 
        /* Check if this node has already been selected... */
@@ -713,9 +714,9 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
                                           uint16_t tvlv_value_len)
 {
        if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
-               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
+               clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
        else
-               orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
+               set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
 }
 
 /**
@@ -1138,6 +1139,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
+ *
+ * Returns true if the packet was snooped and consumed by DAT. False if the
+ * packet has to be delivered to the interface
  */
 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
                                         struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1149,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        uint16_t type;
        __be32 ip_src, ip_dst;
        uint8_t *hw_src, *hw_dst;
-       bool ret = false;
+       bool dropped = false;
        unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1178,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        /* if this REPLY is directed to a client of mine, let's deliver the
         * packet to the interface
         */
-       ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
+       dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
+
+       /* if this REPLY is sent on behalf of a client of mine, let's drop the
+        * packet because the client will reply by itself
+        */
+       dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
 out:
-       if (ret)
+       if (dropped)
                kfree_skb(skb);
-       /* if ret == false -> packet has to be delivered to the interface */
-       return ret;
+       /* if dropped == false -> deliver to the interface */
+       return dropped;
 }
 
 /**
index bb01586206289929f8c5e43153b75f9c279b9f85..6012e2b4af4f5d400385fe8c6f5ca00524933a65 100644 (file)
@@ -153,15 +153,11 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
        struct batadv_neigh_node *router;
        struct batadv_neigh_ifinfo *router_ifinfo;
        struct batadv_gw_node *gw_node, *curr_gw = NULL;
-       uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
-       uint32_t gw_divisor;
+       uint64_t max_gw_factor = 0, tmp_gw_factor = 0;
        uint8_t max_tq = 0;
        uint8_t tq_avg;
        struct batadv_orig_node *orig_node;
 
-       gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
-       gw_divisor *= 64;
-
        rcu_read_lock();
        hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
@@ -187,7 +183,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                        tmp_gw_factor = tq_avg * tq_avg;
                        tmp_gw_factor *= gw_node->bandwidth_down;
                        tmp_gw_factor *= 100 * 100;
-                       tmp_gw_factor /= gw_divisor;
+                       tmp_gw_factor >>= 18;
 
                        if ((tmp_gw_factor > max_gw_factor) ||
                            ((tmp_gw_factor == max_gw_factor) &&
@@ -439,6 +435,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
+       gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+       gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
        atomic_set(&gw_node->refcount, 1);
 
        spin_lock_bh(&bat_priv->gw.list_lock);
index 7aa480b7edd0d5fa56a7d88aa09c9db5da48068a..68a9554961eb41544c6da01b38cb65821b9e6ef5 100644 (file)
@@ -19,6 +19,8 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -588,19 +590,26 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
  *
  * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
  * orig, has toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
                                             struct batadv_orig_node *orig,
                                             uint8_t mcast_flags)
 {
+       struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
+       struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
+
        /* switched from flag unset to set */
        if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
            !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
                atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node,
-                                  &bat_priv->mcast.want_all_unsnoopables_list);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(!hlist_unhashed(node));
+
+               hlist_add_head_rcu(node, head);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        /* switched from flag set to unset */
        } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
@@ -608,7 +617,10 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
                atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(hlist_unhashed(node));
+
+               hlist_del_init_rcu(node);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        }
 }
@@ -621,19 +633,26 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
  *
  * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
  * toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig,
                                          uint8_t mcast_flags)
 {
+       struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
+       struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
+
        /* switched from flag unset to set */
        if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
            !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
                atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node,
-                                  &bat_priv->mcast.want_all_ipv4_list);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(!hlist_unhashed(node));
+
+               hlist_add_head_rcu(node, head);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        /* switched from flag set to unset */
        } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
@@ -641,7 +660,10 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
                atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_del_rcu(&orig->mcast_want_all_ipv4_node);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(hlist_unhashed(node));
+
+               hlist_del_init_rcu(node);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        }
 }
@@ -654,19 +676,26 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
  *
  * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
  * toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig,
                                          uint8_t mcast_flags)
 {
+       struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
+       struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
+
        /* switched from flag unset to set */
        if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
            !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
                atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node,
-                                  &bat_priv->mcast.want_all_ipv6_list);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(!hlist_unhashed(node));
+
+               hlist_add_head_rcu(node, head);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        /* switched from flag set to unset */
        } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
@@ -674,7 +703,10 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
                atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
 
                spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-               hlist_del_rcu(&orig->mcast_want_all_ipv6_node);
+               /* flag checks above + mcast_handler_lock prevents this */
+               WARN_ON(hlist_unhashed(node));
+
+               hlist_del_init_rcu(node);
                spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
        }
 }
@@ -697,39 +729,42 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
        uint8_t mcast_flags = BATADV_NO_FLAGS;
        bool orig_initialized;
 
-       orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST;
+       if (orig_mcast_enabled && tvlv_value &&
+           (tvlv_value_len >= sizeof(mcast_flags)))
+               mcast_flags = *(uint8_t *)tvlv_value;
+
+       spin_lock_bh(&orig->mcast_handler_lock);
+       orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
+                                   &orig->capa_initialized);
 
        /* If mcast support is turned on decrease the disabled mcast node
         * counter only if we had increased it for this node before. If this
         * is a completely new orig_node no need to decrease the counter.
         */
        if (orig_mcast_enabled &&
-           !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) {
+           !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
                if (orig_initialized)
                        atomic_dec(&bat_priv->mcast.num_disabled);
-               orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
+               set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
        /* If mcast support is being switched off or if this is an initial
         * OGM without mcast support then increase the disabled mcast
         * node counter.
         */
        } else if (!orig_mcast_enabled &&
-                  (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+                  (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) ||
                    !orig_initialized)) {
                atomic_inc(&bat_priv->mcast.num_disabled);
-               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
+               clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
        }
 
-       orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST;
-
-       if (orig_mcast_enabled && tvlv_value &&
-           (tvlv_value_len >= sizeof(mcast_flags)))
-               mcast_flags = *(uint8_t *)tvlv_value;
+       set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
 
        batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
        batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
        batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
 
        orig->mcast_flags = mcast_flags;
+       spin_unlock_bh(&orig->mcast_handler_lock);
 }
 
 /**
@@ -763,11 +798,15 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
 {
        struct batadv_priv *bat_priv = orig->bat_priv;
 
-       if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
-           orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
+       spin_lock_bh(&orig->mcast_handler_lock);
+
+       if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) &&
+           test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized))
                atomic_dec(&bat_priv->mcast.num_disabled);
 
        batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
        batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
        batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
+
+       spin_unlock_bh(&orig->mcast_handler_lock);
 }
index f0a50f31d822e8a9f90477249b0727a668f70a28..46604010dcd42aa83ee2a0801dbaf77005e03d63 100644 (file)
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/byteorder/generic.h>
 #include <linux/compiler.h>
 #include <linux/debugfs.h>
@@ -134,9 +135,9 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
                                          uint16_t tvlv_value_len)
 {
        if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
-               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
+               clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
        else
-               orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
+               set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
 }
 
 /**
@@ -894,7 +895,7 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
                goto out;
 
        /* check if orig node is network coding enabled */
-       if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
+       if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
                goto out;
 
        /* accept ogms from 'good' neighbors and single hop neighbors */
index 018b7495ad844cdf4f97f1590455bc205cdf3d71..32a0fcfab36d918b9359633a326191745b409fb4 100644 (file)
@@ -696,8 +696,13 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
        orig_node->last_seen = jiffies;
        reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
        orig_node->bcast_seqno_reset = reset_time;
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
        orig_node->mcast_flags = BATADV_NO_FLAGS;
+       INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
+       INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
+       INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
+       spin_lock_init(&orig_node->mcast_handler_lock);
 #endif
 
        /* create a vlan object for the "untagged" LAN */
index 0a01992e65ab06b898e40a8097a29e5162c53b08..191076ef1eca1d0a6a2833d4437f925da1b9e71b 100644 (file)
@@ -616,7 +616,8 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
                 * we delete only packets belonging to the given interface
                 */
                if ((hard_iface) &&
-                   (forw_packet->if_incoming != hard_iface))
+                   (forw_packet->if_incoming != hard_iface) &&
+                   (forw_packet->if_outgoing != hard_iface))
                        continue;
 
                spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
index c002961da75d655deb813990f5706cf37fbd6d7d..49d3d3aa59cba2777ed1697f5b5bd7cbc8ffd217 100644 (file)
@@ -202,6 +202,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
        int gw_mode;
        enum batadv_forw_mode forw_mode;
        struct batadv_orig_node *mcast_single_orig = NULL;
+       int network_offset = ETH_HLEN;
 
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
@@ -214,14 +215,18 @@ static int batadv_interface_tx(struct sk_buff *skb,
        case ETH_P_8021Q:
                vhdr = vlan_eth_hdr(skb);
 
-               if (vhdr->h_vlan_encapsulated_proto != ethertype)
+               if (vhdr->h_vlan_encapsulated_proto != ethertype) {
+                       network_offset += VLAN_HLEN;
                        break;
+               }
 
                /* fall through */
        case ETH_P_BATMAN:
                goto dropped;
        }
 
+       skb_set_network_header(skb, network_offset);
+
        if (batadv_bla_tx(bat_priv, skb, vid))
                goto dropped;
 
@@ -479,6 +484,9 @@ out:
  */
 void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
 {
+       if (!vlan)
+               return;
+
        if (atomic_dec_and_test(&vlan->refcount)) {
                spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
                hlist_del_rcu(&vlan->list);
@@ -933,7 +941,7 @@ static void batadv_softif_init_early(struct net_device *dev)
        dev->netdev_ops = &batadv_netdev_ops;
        dev->destructor = batadv_softif_free;
        dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        /* can't call min_mtu, because the needed variables
         * have not been initialized yet
index b4824951010ba6b42bf7d7c7eb62c529ed340158..c1eb7b72ab15fb3a09bf3dc236dc7c83043d51d9 100644 (file)
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/compiler.h>
@@ -594,6 +595,12 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 
        /* increase the refcounter of the related vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
+                addr, BATADV_PRINT_VID(vid))) {
+               kfree(tt_local);
+               tt_local = NULL;
+               goto out;
+       }
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1041,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
        struct batadv_tt_local_entry *tt_local_entry;
        uint16_t flags, curr_flags = BATADV_NO_FLAGS;
        struct batadv_softif_vlan *vlan;
+       void *tt_entry_exists;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1061,11 +1069,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
         * immediately purge it
         */
        batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
-       hlist_del_rcu(&tt_local_entry->common.hash_entry);
+
+       tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+                                            batadv_compare_tt,
+                                            batadv_choose_tt,
+                                            &tt_local_entry->common);
+       if (!tt_entry_exists)
+               goto out;
+
+       /* extra call to free the local tt entry */
        batadv_tt_local_entry_free_ref(tt_local_entry);
 
        /* decrease the reference held for this vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan)
+               goto out;
+
        batadv_softif_vlan_free_ref(vlan);
        batadv_softif_vlan_free_ref(vlan);
 
@@ -1166,8 +1185,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                        /* decrease the reference held for this vlan */
                        vlan = batadv_softif_vlan_get(bat_priv,
                                                      tt_common_entry->vid);
-                       batadv_softif_vlan_free_ref(vlan);
-                       batadv_softif_vlan_free_ref(vlan);
+                       if (vlan) {
+                               batadv_softif_vlan_free_ref(vlan);
+                               batadv_softif_vlan_free_ref(vlan);
+                       }
 
                        batadv_tt_local_entry_free_ref(tt_local);
                }
@@ -1862,7 +1883,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                }
                spin_unlock_bh(list_lock);
        }
-       orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
+       clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
 }
 
 static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
@@ -2195,7 +2216,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
        spin_lock_bh(&bat_priv->tt.req_list_lock);
 
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
-               list_del(&node->list);
+               list_del_init(&node->list);
                kfree(node);
        }
 
@@ -2231,7 +2252,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (batadv_has_timed_out(node->issued_at,
                                         BATADV_TT_REQUEST_TIMEOUT)) {
-                       list_del(&node->list);
+                       list_del_init(&node->list);
                        kfree(node);
                }
        }
@@ -2513,7 +2534,8 @@ out:
                batadv_hardif_free_ref(primary_if);
        if (ret && tt_req_node) {
                spin_lock_bh(&bat_priv->tt.req_list_lock);
-               list_del(&tt_req_node->list);
+               /* list_del_init() verifies tt_req_node still is in the list */
+               list_del_init(&tt_req_node->list);
                spin_unlock_bh(&bat_priv->tt.req_list_lock);
                kfree(tt_req_node);
        }
@@ -2821,7 +2843,7 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
                                return;
                }
        }
-       orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
+       set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
 }
 
 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
@@ -2950,7 +2972,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (!batadv_compare_eth(node->addr, resp_src))
                        continue;
-               list_del(&node->list);
+               list_del_init(&node->list);
                kfree(node);
        }
 
@@ -3207,8 +3229,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 
                        /* decrease the reference held for this vlan */
                        vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
-                       batadv_softif_vlan_free_ref(vlan);
-                       batadv_softif_vlan_free_ref(vlan);
+                       if (vlan) {
+                               batadv_softif_vlan_free_ref(vlan);
+                               batadv_softif_vlan_free_ref(vlan);
+                       }
 
                        batadv_tt_local_entry_free_ref(tt_local);
                }
@@ -3321,7 +3345,8 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
        bool has_tt_init;
 
        tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
-       has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
+       has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
+                              &orig_node->capa_initialized);
 
        /* orig table not initialised AND first diff is in the OGM OR the ttvn
         * increased by one -> we can apply the attached changes
index 67d63483618eba4791ee4fffb5654e9c85d82482..55610a805b533bd227332d88a4b5f4b29c5deebd 100644 (file)
@@ -221,6 +221,7 @@ struct batadv_orig_bat_iv {
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
  * @last_seen: time when last packet from this node was received
  * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
  * @mcast_flags: multicast flags announced by the orig node
  * @mcast_want_all_unsnoop_node: a list node for the
  *  mcast.want_all_unsnoopables list
@@ -268,13 +269,15 @@ struct batadv_orig_node {
        unsigned long last_seen;
        unsigned long bcast_seqno_reset;
 #ifdef CONFIG_BATMAN_ADV_MCAST
+       /* synchronizes mcast tvlv specific orig changes */
+       spinlock_t mcast_handler_lock;
        uint8_t mcast_flags;
        struct hlist_node mcast_want_all_unsnoopables_node;
        struct hlist_node mcast_want_all_ipv4_node;
        struct hlist_node mcast_want_all_ipv6_node;
 #endif
-       uint8_t capabilities;
-       uint8_t capa_initialized;
+       unsigned long capabilities;
+       unsigned long capa_initialized;
        atomic_t last_ttvn;
        unsigned char *tt_buff;
        int16_t tt_buff_len;
@@ -313,10 +316,10 @@ struct batadv_orig_node {
  *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
  */
 enum batadv_orig_capabilities {
-       BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
-       BATADV_ORIG_CAPA_HAS_NC = BIT(1),
-       BATADV_ORIG_CAPA_HAS_TT = BIT(2),
-       BATADV_ORIG_CAPA_HAS_MCAST = BIT(3),
+       BATADV_ORIG_CAPA_HAS_DAT,
+       BATADV_ORIG_CAPA_HAS_NC,
+       BATADV_ORIG_CAPA_HAS_TT,
+       BATADV_ORIG_CAPA_HAS_MCAST,
 };
 
 /**
index 0ffe2e24020aa86b80115221811f324511cc1385..131e79cde3504f161e38f7bd5797f2de06eb63d1 100644 (file)
@@ -85,7 +85,7 @@ struct lowpan_dev {
 
 static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
 {
-       return netdev_priv(netdev);
+       return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
 }
 
 static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
@@ -848,8 +848,9 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        struct net_device *netdev;
        int err = 0;
 
-       netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
-                             NET_NAME_UNKNOWN, netdev_setup);
+       netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
+                             IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
+                             netdev_setup);
        if (!netdev)
                return -ENOMEM;
 
@@ -859,7 +860,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
        SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
-       *dev = netdev_priv(netdev);
+       *dev = lowpan_dev(netdev);
        (*dev)->netdev = netdev;
        (*dev)->hdev = chan->conn->hcon->hdev;
        INIT_LIST_HEAD(&(*dev)->peers);
@@ -869,6 +870,8 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
        spin_unlock(&devices_lock);
 
+       lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);
+
        err = register_netdev(netdev);
        if (err < 0) {
                BT_INFO("register_netdev failed %d", err);
index 238ddd3cf95fb660d41f751821a09550f977f067..e32f34189007967e7674a501e2c944029623cfab 100644 (file)
@@ -379,7 +379,7 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
        amp_ctrl_put(ctrl);
 
        hci_req_init(&req, hdev);
-       hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
        hci_req_run_skb(&req, write_remote_amp_assoc_complete);
 
        kfree(cp);
index 2c48bf0b5afbd00d4b0308e8bb74a92fc9b72a7a..b4548c739a6475446d643bd5b01ab8627ef1f08e 100644 (file)
@@ -64,6 +64,48 @@ static void hci_le_create_connection_cancel(struct hci_conn *conn)
        hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
 }
 
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+{
+       struct hci_conn_params *params;
+       struct smp_irk *irk;
+       bdaddr_t *bdaddr;
+       u8 bdaddr_type;
+
+       bdaddr = &conn->dst;
+       bdaddr_type = conn->dst_type;
+
+       /* Check if we need to convert to identity address */
+       irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
+       if (irk) {
+               bdaddr = &irk->bdaddr;
+               bdaddr_type = irk->addr_type;
+       }
+
+       params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
+       if (!params)
+               return;
+
+       /* The connection attempt was doing scan for new RPA, and is
+        * in scan phase. If params are not associated with any other
+        * autoconnect action, remove them completely. If they are, just unmark
+        * them as waiting for connection, by clearing explicit_connect field.
+        */
+       if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+               hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
+       else
+               params->explicit_connect = false;
+}
+
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_remove(struct hci_conn *conn)
+{
+       hci_connect_le_scan_cleanup(conn);
+
+       hci_conn_hash_del(conn->hdev, conn);
+       hci_update_background_scan(conn->hdev);
+}
+
 static void hci_acl_create_connection(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
@@ -340,8 +382,12 @@ static void hci_conn_timeout(struct work_struct *work)
                if (conn->out) {
                        if (conn->type == ACL_LINK)
                                hci_acl_create_connection_cancel(conn);
-                       else if (conn->type == LE_LINK)
-                               hci_le_create_connection_cancel(conn);
+                       else if (conn->type == LE_LINK) {
+                               if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+                                       hci_connect_le_scan_remove(conn);
+                               else
+                                       hci_le_create_connection_cancel(conn);
+                       }
                } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
                        hci_reject_sco(conn);
                }
@@ -637,15 +683,18 @@ static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        struct hci_conn *conn;
 
-       if (status == 0)
-               return;
+       hci_dev_lock(hdev);
+
+       conn = hci_lookup_le_connect(hdev);
+
+       if (!status) {
+               hci_connect_le_scan_cleanup(conn);
+               goto done;
+       }
 
        BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
               status);
 
-       hci_dev_lock(hdev);
-
-       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
        if (!conn)
                goto done;
 
@@ -685,6 +734,7 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
        hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
 
        conn->state = BT_CONNECT;
+       clear_bit(HCI_CONN_SCANNING, &conn->flags);
 }
 
 static void hci_req_directed_advertising(struct hci_request *req,
@@ -728,7 +778,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                                u8 role)
 {
        struct hci_conn_params *params;
-       struct hci_conn *conn;
+       struct hci_conn *conn, *conn_unfinished;
        struct smp_irk *irk;
        struct hci_request req;
        int err;
@@ -751,26 +801,29 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
         * and return the object found.
         */
        conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+       conn_unfinished = NULL;
        if (conn) {
-               conn->pending_sec_level = sec_level;
-               goto done;
+               if (conn->state == BT_CONNECT &&
+                   test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+                       BT_DBG("will continue unfinished conn %pMR", dst);
+                       conn_unfinished = conn;
+               } else {
+                       if (conn->pending_sec_level < sec_level)
+                               conn->pending_sec_level = sec_level;
+                       goto done;
+               }
        }
 
        /* Since the controller supports only one LE connection attempt at a
         * time, we return -EBUSY if there is any connection attempt running.
         */
-       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-       if (conn)
+       if (hci_lookup_le_connect(hdev))
                return ERR_PTR(-EBUSY);
 
        /* When given an identity address with existing identity
         * resolving key, the connection needs to be established
         * to a resolvable random address.
         *
-        * This uses the cached random resolvable address from
-        * a previous scan. When no cached address is available,
-        * try connecting to the identity address instead.
-        *
         * Storing the resolvable random address is required here
         * to handle connection failures. The address will later
         * be resolved back into the original identity address
@@ -782,15 +835,23 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                dst_type = ADDR_LE_DEV_RANDOM;
        }
 
-       conn = hci_conn_add(hdev, LE_LINK, dst, role);
+       if (conn_unfinished) {
+               conn = conn_unfinished;
+               bacpy(&conn->dst, dst);
+       } else {
+               conn = hci_conn_add(hdev, LE_LINK, dst, role);
+       }
+
        if (!conn)
                return ERR_PTR(-ENOMEM);
 
        conn->dst_type = dst_type;
        conn->sec_level = BT_SECURITY_LOW;
-       conn->pending_sec_level = sec_level;
        conn->conn_timeout = conn_timeout;
 
+       if (!conn_unfinished)
+               conn->pending_sec_level = sec_level;
+
        hci_req_init(&req, hdev);
 
        /* Disable advertising if we're active. For master role
@@ -854,6 +915,144 @@ create_conn:
                return ERR_PTR(err);
        }
 
+done:
+       /* If this is continuation of connect started by hci_connect_le_scan,
+        * it already called hci_conn_hold and calling it again would mess the
+        * counter.
+        */
+       if (!conn_unfinished)
+               hci_conn_hold(conn);
+
+       return conn;
+}
+
+static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
+                                        u16 opcode)
+{
+       struct hci_conn *conn;
+
+       if (!status)
+               return;
+
+       BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
+              status);
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+       if (conn)
+               hci_le_conn_failed(conn, status);
+
+       hci_dev_unlock(hdev);
+}
+
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
+{
+       struct hci_conn *conn;
+
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+       if (!conn)
+               return false;
+
+       if (conn->dst_type != type)
+               return false;
+
+       if (conn->state != BT_CONNECTED)
+               return false;
+
+       return true;
+}
+
+/* This function requires the caller holds hdev->lock */
+static int hci_explicit_conn_params_set(struct hci_request *req,
+                                       bdaddr_t *addr, u8 addr_type)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_conn_params *params;
+
+       if (is_connected(hdev, addr, addr_type))
+               return -EISCONN;
+
+       params = hci_conn_params_add(hdev, addr, addr_type);
+       if (!params)
+               return -EIO;
+
+       /* If we created new params, or existing params were marked as disabled,
+        * mark them to be used just once to connect.
+        */
+       if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+               params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+               list_del_init(&params->action);
+               list_add(&params->action, &hdev->pend_le_conns);
+       }
+
+       params->explicit_connect = true;
+       __hci_update_background_scan(req);
+
+       BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
+              params->auto_connect);
+
+       return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+                                    u8 dst_type, u8 sec_level,
+                                    u16 conn_timeout, u8 role)
+{
+       struct hci_conn *conn;
+       struct hci_request req;
+       int err;
+
+       /* Let's make sure that le is enabled.*/
+       if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+               if (lmp_le_capable(hdev))
+                       return ERR_PTR(-ECONNREFUSED);
+
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+
+       /* Some devices send ATT messages as soon as the physical link is
+        * established. To be able to handle these ATT messages, the user-
+        * space first establishes the connection and then starts the pairing
+        * process.
+        *
+        * So if a hci_conn object already exists for the following connection
+        * attempt, we simply update pending_sec_level and auth_type fields
+        * and return the object found.
+        */
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+       if (conn) {
+               if (conn->pending_sec_level < sec_level)
+                       conn->pending_sec_level = sec_level;
+               goto done;
+       }
+
+       BT_DBG("requesting refresh of dst_addr");
+
+       conn = hci_conn_add(hdev, LE_LINK, dst, role);
+       if (!conn)
+               return ERR_PTR(-ENOMEM);
+
+       hci_req_init(&req, hdev);
+
+       if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
+               return ERR_PTR(-EBUSY);
+
+       conn->state = BT_CONNECT;
+       set_bit(HCI_CONN_SCANNING, &conn->flags);
+
+       err = hci_req_run(&req, hci_connect_le_scan_complete);
+       if (err && err != -ENODATA) {
+               hci_conn_del(conn);
+               return ERR_PTR(err);
+       }
+
+       conn->dst_type = dst_type;
+       conn->sec_level = BT_SECURITY_LOW;
+       conn->pending_sec_level = sec_level;
+       conn->conn_timeout = conn_timeout;
+
 done:
        hci_conn_hold(conn);
        return conn;
index bc43b6490555c7d75dae8ac452a7b012c8f1ffa1..adcbc74c243268e8330bf760c39fa792a1c2a3a8 100644 (file)
@@ -2847,6 +2847,30 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
        return NULL;
 }
 
+/* This function requires the caller holds hdev->lock */
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+                                                   bdaddr_t *addr,
+                                                   u8 addr_type)
+{
+       struct hci_conn_params *param;
+
+       list_for_each_entry(param, &hdev->pend_le_conns, action) {
+               if (bacmp(&param->addr, addr) == 0 &&
+                   param->addr_type == addr_type &&
+                   param->explicit_connect)
+                       return param;
+       }
+
+       list_for_each_entry(param, &hdev->pend_le_reports, action) {
+               if (bacmp(&param->addr, addr) == 0 &&
+                   param->addr_type == addr_type &&
+                   param->explicit_connect)
+                       return param;
+       }
+
+       return NULL;
+}
+
 /* This function requires the caller holds hdev->lock */
 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
                                            bdaddr_t *addr, u8 addr_type)
@@ -2916,6 +2940,15 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
        list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
                if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
                        continue;
+
+               /* If trying to estabilish one time connection to disabled
+                * device, leave the params, but mark them as just once.
+                */
+               if (params->explicit_connect) {
+                       params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+                       continue;
+               }
+
                list_del(&params->list);
                kfree(params);
        }
index 218d7dfc342f484b0b9b18c4208a2ccc5efc0cb8..7ba35a9ba6b77db152db5931530d8d0c5dcec6b5 100644 (file)
@@ -1059,7 +1059,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
                hci_dev_set_flag(hdev, HCI_LE_ADV);
 
-               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               conn = hci_lookup_le_connect(hdev);
                if (conn)
                        queue_delayed_work(hdev->workqueue,
                                           &conn->le_conn_timeout,
@@ -4447,7 +4447,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
         */
        hci_dev_clear_flag(hdev, HCI_LE_ADV);
 
-       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+       conn = hci_lookup_le_connect(hdev);
        if (!conn) {
                conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
                if (!conn) {
@@ -4640,42 +4640,49 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
        /* If we're not connectable only connect devices that we have in
         * our pend_le_conns list.
         */
-       params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
-                                          addr, addr_type);
+       params = hci_explicit_connect_lookup(hdev, addr, addr_type);
+
        if (!params)
                return NULL;
 
-       switch (params->auto_connect) {
-       case HCI_AUTO_CONN_DIRECT:
-               /* Only devices advertising with ADV_DIRECT_IND are
-                * triggering a connection attempt. This is allowing
-                * incoming connections from slave devices.
-                */
-               if (adv_type != LE_ADV_DIRECT_IND)
+       if (!params->explicit_connect) {
+               switch (params->auto_connect) {
+               case HCI_AUTO_CONN_DIRECT:
+                       /* Only devices advertising with ADV_DIRECT_IND are
+                        * triggering a connection attempt. This is allowing
+                        * incoming connections from slave devices.
+                        */
+                       if (adv_type != LE_ADV_DIRECT_IND)
+                               return NULL;
+                       break;
+               case HCI_AUTO_CONN_ALWAYS:
+                       /* Devices advertising with ADV_IND or ADV_DIRECT_IND
+                        * are triggering a connection attempt. This means
+                        * that incoming connectioms from slave device are
+                        * accepted and also outgoing connections to slave
+                        * devices are established when found.
+                        */
+                       break;
+               default:
                        return NULL;
-               break;
-       case HCI_AUTO_CONN_ALWAYS:
-               /* Devices advertising with ADV_IND or ADV_DIRECT_IND
-                * are triggering a connection attempt. This means
-                * that incoming connectioms from slave device are
-                * accepted and also outgoing connections to slave
-                * devices are established when found.
-                */
-               break;
-       default:
-               return NULL;
+               }
        }
 
        conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
                              HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
        if (!IS_ERR(conn)) {
-               /* Store the pointer since we don't really have any
+               /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
+                * by higher layer that tried to connect, if no then
+                * store the pointer since we don't really have any
                 * other owner of the object besides the params that
                 * triggered it. This way we can abort the connection if
                 * the parameters get removed and keep the reference
                 * count consistent once the connection is established.
                 */
-               params->conn = hci_conn_get(conn);
+
+               if (!params->explicit_connect)
+                       params->conn = hci_conn_get(conn);
+
                return conn;
        }
 
index d6025d6e6d59f957c612a1e7ff455f770734eb6a..b7369220c9efff616d13f3ad3aeec44c809bd4e0 100644 (file)
@@ -317,7 +317,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
         * address be updated at the next cycle.
         */
        if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
-           hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+           hci_lookup_le_connect(hdev)) {
                BT_DBG("Deferring random address update");
                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
                return;
@@ -479,7 +479,6 @@ void hci_update_page_scan(struct hci_dev *hdev)
 void __hci_update_background_scan(struct hci_request *req)
 {
        struct hci_dev *hdev = req->hdev;
-       struct hci_conn *conn;
 
        if (!test_bit(HCI_UP, &hdev->flags) ||
            test_bit(HCI_INIT, &hdev->flags) ||
@@ -529,8 +528,7 @@ void __hci_update_background_scan(struct hci_request *req)
                 * since some controllers are not able to scan and connect at
                 * the same time.
                 */
-               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-               if (conn)
+               if (hci_lookup_le_connect(hdev))
                        return;
 
                /* If controller is currently scanning, we stop it to ensure we
index 45fffa4136421b8dd5ab301cf0a4ec529b36ff8e..7c65ee200c29215c6b3f050cfbb881873be4946a 100644 (file)
@@ -7113,8 +7113,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                else
                        role = HCI_ROLE_MASTER;
 
-               hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
-                                     HCI_LE_CONN_TIMEOUT, role);
+               hcon = hci_connect_le_scan(hdev, dst, dst_type,
+                                          chan->sec_level,
+                                          HCI_LE_CONN_TIMEOUT,
+                                          role);
        } else {
                u8 auth_type = l2cap_get_auth_type(chan);
                hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
index 7ab191589541c8fab56d47ab9f0e21f050751de9..ccaf5a436d8f7a70799729a04ffc17583d11913f 100644 (file)
@@ -3564,9 +3564,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                 */
                hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
 
-               conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
-                                     sec_level, HCI_LE_CONN_TIMEOUT,
-                                     HCI_ROLE_MASTER);
+               conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
+                                          addr_type, sec_level,
+                                          HCI_LE_CONN_TIMEOUT,
+                                          HCI_ROLE_MASTER);
        }
 
        if (IS_ERR(conn)) {
@@ -4210,7 +4211,7 @@ static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
                /* Don't let discovery abort an outgoing connection attempt
                 * that's using directed advertising.
                 */
-               if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+               if (hci_lookup_le_connect(hdev)) {
                        *status = MGMT_STATUS_REJECTED;
                        return false;
                }
@@ -6107,6 +6108,12 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
        switch (auto_connect) {
        case HCI_AUTO_CONN_DISABLED:
        case HCI_AUTO_CONN_LINK_LOSS:
+               /* If auto connect is being disabled when we're trying to
+                * connect to device, keep connecting.
+                */
+               if (params->explicit_connect)
+                       list_add(&params->action, &hdev->pend_le_conns);
+
                __hci_update_background_scan(req);
                break;
        case HCI_AUTO_CONN_REPORT:
@@ -7843,7 +7850,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
        /* Make sure we copy only the significant bytes based on the
         * encryption key size, and set the rest of the value to zeroes.
         */
-       memcpy(ev.key.val, key->val, sizeof(key->enc_size));
+       memcpy(ev.key.val, key->val, key->enc_size);
        memset(ev.key.val + key->enc_size, 0,
               sizeof(ev.key.val) - key->enc_size);
 
index 0aa8f5cf46a17171c627e6949c51e684f28a58ed..6ed2feb51e3c7ae3d0f0fd629d6e147549b5fa54 100644 (file)
@@ -365,8 +365,7 @@ void br_dev_setup(struct net_device *dev)
        dev->destructor = br_dev_free;
        dev->ethtool_ops = &br_ethtool_ops;
        SET_NETDEV_DEVTYPE(dev, &br_type);
-       dev->tx_queue_len = 0;
-       dev->priv_flags = IFF_EBRIDGE;
+       dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
 
        dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
index 0752796fe0ba4443036a94a385ef4d1666cd3adc..66efdc21f548524a19f3abc3ea5268b245f88dd2 100644 (file)
@@ -1608,7 +1608,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
                break;
        }
 
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
        return err;
@@ -1653,7 +1653,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                break;
        }
 
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
        return err;
index 91a2e08c2bb84546fb2ab7ac2bd7974e2dcb999a..dbcb1949ea58c2a4ca26135134367b90eb55f8e7 100644 (file)
@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
                + nla_total_size(1)     /* IFLA_BRPORT_FAST_LEAVE */
                + nla_total_size(1)     /* IFLA_BRPORT_LEARNING */
                + nla_total_size(1)     /* IFLA_BRPORT_UNICAST_FLOOD */
+               + nla_total_size(1)     /* IFLA_BRPORT_PROXYARP */
+               + nla_total_size(1)     /* IFLA_BRPORT_PROXYARP_WIFI */
                + 0;
 }
 
@@ -504,6 +506,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
        [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
        [IFLA_BRPORT_LEARNING]  = { .type = NLA_U8 },
        [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
+       [IFLA_BRPORT_PROXYARP]  = { .type = NLA_U8 },
+       [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
 };
 
 /* Change the state of the port and notify spanning tree */
@@ -724,6 +728,7 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
        [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
        [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
        [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
+       [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
 };
 
 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -771,6 +776,14 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
                br_stp_set_bridge_priority(br, priority);
        }
 
+       if (data[IFLA_BR_VLAN_FILTERING]) {
+               u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
+
+               err = __br_vlan_filter_toggle(br, vlan_filter);
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
@@ -782,6 +795,7 @@ static size_t br_get_size(const struct net_device *brdev)
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_AGEING_TIME */
               nla_total_size(sizeof(u32)) +    /* IFLA_BR_STP_STATE */
               nla_total_size(sizeof(u16)) +    /* IFLA_BR_PRIORITY */
+              nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_FILTERING */
               0;
 }
 
@@ -794,13 +808,15 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
        u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
        u32 stp_enabled = br->stp_enabled;
        u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
+       u8 vlan_enabled = br_vlan_enabled(br);
 
        if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
            nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
            nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
            nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
            nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
-           nla_put_u16(skb, IFLA_BR_PRIORITY, priority))
+           nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
+           nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled))
                return -EMSGSIZE;
 
        return 0;
@@ -833,7 +849,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
        .kind                   = "bridge",
        .priv_size              = sizeof(struct net_bridge),
        .setup                  = br_dev_setup,
-       .maxtype                = IFLA_BRPORT_MAX,
+       .maxtype                = IFLA_BR_MAX,
        .policy                 = br_policy,
        .validate               = br_validate,
        .newlink                = br_dev_newlink,
index e2cb359f9dd3279be534cb24d9f0a28cbcc3bf47..3d95647039d0f75eec0844454fea9356a5de7e02 100644 (file)
@@ -614,6 +614,7 @@ int br_vlan_delete(struct net_bridge *br, u16 vid);
 void br_vlan_flush(struct net_bridge *br);
 bool br_vlan_find(struct net_bridge *br, u16 vid);
 void br_recalculate_fwd_mask(struct net_bridge *br);
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
 int br_vlan_init(struct net_bridge *br);
@@ -771,6 +772,12 @@ static inline int br_vlan_enabled(struct net_bridge *br)
 {
        return 0;
 }
+
+static inline int __br_vlan_filter_toggle(struct net_bridge *br,
+                                         unsigned long val)
+{
+       return -EOPNOTSUPP;
+}
 #endif
 
 struct nf_br_ops {
index 0d41f81838ff95f8d6ff34246abda0ac3df1f5a2..3cef6892c0bbdb03b0807e3fa48711b44e676020 100644 (file)
@@ -468,21 +468,27 @@ void br_recalculate_fwd_mask(struct net_bridge *br)
                                              ~(1u << br->group_addr[5]);
 }
 
-int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 {
-       if (!rtnl_trylock())
-               return restart_syscall();
-
        if (br->vlan_enabled == val)
-               goto unlock;
+               return 0;
 
        br->vlan_enabled = val;
        br_manage_promisc(br);
        recalculate_group_addr(br);
        br_recalculate_fwd_mask(br);
 
-unlock:
+       return 0;
+}
+
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+{
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       __br_vlan_filter_toggle(br, val);
        rtnl_unlock();
+
        return 0;
 }
 
index edbca468fa73cc29b31703bd4fe4d70925f21bd5..d730a0f68f46b43b3e8dd51cb3bb029f04cde93a 100644 (file)
@@ -177,7 +177,7 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
        skb->protocol = htons(ETH_P_CAIF);
 
        /* Check if we need to handle xoff */
-       if (likely(caifd->netdev->tx_queue_len == 0))
+       if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
                goto noxoff;
 
        if (unlikely(caifd->xoff))
index 4967262b27076af66347d20eca54b65a2e61d789..617088aee21d41ba98d4ef5ebee5d6c002efe029 100644 (file)
@@ -131,12 +131,12 @@ out_noerr:
        goto out;
 }
 
-static int skb_set_peeked(struct sk_buff *skb)
+static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
 {
        struct sk_buff *nskb;
 
        if (skb->peeked)
-               return 0;
+               return skb;
 
        /* We have to unshare an skb before modifying it. */
        if (!skb_shared(skb))
@@ -144,7 +144,7 @@ static int skb_set_peeked(struct sk_buff *skb)
 
        nskb = skb_clone(skb, GFP_ATOMIC);
        if (!nskb)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        skb->prev->next = nskb;
        skb->next->prev = nskb;
@@ -157,7 +157,7 @@ static int skb_set_peeked(struct sk_buff *skb)
 done:
        skb->peeked = 1;
 
-       return 0;
+       return skb;
 }
 
 /**
@@ -229,8 +229,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                                        continue;
                                }
 
-                               error = skb_set_peeked(skb);
-                               if (error)
+                               skb = skb_set_peeked(skb);
+                               error = PTR_ERR(skb);
+                               if (IS_ERR(skb))
                                        goto unlock_err;
 
                                atomic_inc(&skb->users);
index 4870c3556a5a68be94cf28b65d527331810e7187..b1f3f4844e60c21248f97ef4c2a3c0b0dddc1bca 100644 (file)
@@ -6997,6 +6997,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
        setup(dev);
 
+       if (!dev->tx_queue_len)
+               printk(KERN_WARNING "%s uses DEPRECATED zero tx_queue_len - convert driver to use IFF_NO_QUEUE instead.\n", name);
+
        dev->num_tx_queues = txqs;
        dev->real_num_tx_queues = txqs;
        if (netif_alloc_netdev_queues(dev))
index f8694d1b8702e70db45a0eee94b5361ecb4214e0..50dcdbb0ee46edc40c4fc105dc91bc8dc55492b8 100644 (file)
@@ -20,6 +20,7 @@
 #include <net/net_namespace.h>
 #include <linux/sched.h>
 #include <linux/prefetch.h>
+#include <net/lwtunnel.h>
 
 #include <net/dst.h>
 #include <net/dst_metadata.h>
@@ -184,6 +185,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
 #ifdef CONFIG_IP_ROUTE_CLASSID
        dst->tclassid = 0;
 #endif
+       dst->lwtstate = NULL;
        atomic_set(&dst->__refcnt, initial_ref);
        dst->__use = 0;
        dst->lastuse = jiffies;
@@ -264,6 +266,7 @@ again:
                kfree(dst);
        else
                kmem_cache_free(dst->ops->kmem_cachep, dst);
+       lwtstate_put(dst->lwtstate);
 
        dst = child;
        if (dst) {
index a50dbfa83ad9c4459dde60bc07908170f607d3c9..b4adc961413ffa7b3b139f156ccec88c21ac7954 100644 (file)
@@ -1124,6 +1124,7 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
        *pfp = fp;
        return 0;
 }
+EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
 
 void bpf_prog_destroy(struct bpf_prog *fp)
 {
@@ -1348,7 +1349,7 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = {
 static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
-       u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+       bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags);
        int offset = (int) r2;
        __sum16 sum, *ptr;
 
@@ -1488,13 +1489,13 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
-       struct ip_tunnel_info *info = skb_tunnel_info(skb, AF_INET);
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
 
        if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
                return -EINVAL;
 
        to->tunnel_id = be64_to_cpu(info->key.tun_id);
-       to->remote_ipv4 = be32_to_cpu(info->key.ipv4_src);
+       to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
 
        return 0;
 }
@@ -1528,7 +1529,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        info = &md->u.tun_info;
        info->mode = IP_TUNNEL_INFO_TX;
        info->key.tun_id = cpu_to_be64(from->tunnel_id);
-       info->key.ipv4_dst = cpu_to_be32(from->remote_ipv4);
+       info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
 
        return 0;
 }
index 5d6d8e3d450aeeaabe50608feba3156b9c9b9963..dfb1a9ca08354fef353bf274415528d2d4ca269a 100644 (file)
@@ -72,7 +72,8 @@ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
 EXPORT_SYMBOL(lwtunnel_encap_del_ops);
 
 int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
-                        struct nlattr *encap, struct lwtunnel_state **lws)
+                        struct nlattr *encap, unsigned int family,
+                        const void *cfg, struct lwtunnel_state **lws)
 {
        const struct lwtunnel_encap_ops *ops;
        int ret = -EINVAL;
@@ -85,7 +86,7 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
        rcu_read_lock();
        ops = rcu_dereference(lwtun_encaps[encap_type]);
        if (likely(ops && ops->build_state))
-               ret = ops->build_state(dev, encap, lws);
+               ret = ops->build_state(dev, encap, family, cfg, lws);
        rcu_read_unlock();
 
        return ret;
@@ -179,14 +180,16 @@ int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
 }
 EXPORT_SYMBOL(lwtunnel_cmp_encap);
 
-int __lwtunnel_output(struct sock *sk, struct sk_buff *skb,
-                     struct lwtunnel_state *lwtstate)
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
 {
+       struct dst_entry *dst = skb_dst(skb);
        const struct lwtunnel_encap_ops *ops;
+       struct lwtunnel_state *lwtstate;
        int ret = -EINVAL;
 
-       if (!lwtstate)
+       if (!dst)
                goto drop;
+       lwtstate = dst->lwtstate;
 
        if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
            lwtstate->type > LWTUNNEL_ENCAP_MAX)
@@ -209,35 +212,38 @@ drop:
 
        return ret;
 }
+EXPORT_SYMBOL(lwtunnel_output);
 
-int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+int lwtunnel_input(struct sk_buff *skb)
 {
-       struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
-       struct lwtunnel_state *lwtstate = NULL;
+       struct dst_entry *dst = skb_dst(skb);
+       const struct lwtunnel_encap_ops *ops;
+       struct lwtunnel_state *lwtstate;
+       int ret = -EINVAL;
 
-       if (rt) {
-               lwtstate = rt->rt6i_lwtstate;
-               skb->dev = rt->dst.dev;
-       }
+       if (!dst)
+               goto drop;
+       lwtstate = dst->lwtstate;
 
-       skb->protocol = htons(ETH_P_IPV6);
+       if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+           lwtstate->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
 
-       return __lwtunnel_output(sk, skb, lwtstate);
-}
-EXPORT_SYMBOL(lwtunnel_output6);
+       ret = -EOPNOTSUPP;
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+       if (likely(ops && ops->input))
+               ret = ops->input(skb);
+       rcu_read_unlock();
 
-int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
-{
-       struct rtable *rt = (struct rtable *)skb_dst(skb);
-       struct lwtunnel_state *lwtstate = NULL;
+       if (ret == -EOPNOTSUPP)
+               goto drop;
 
-       if (rt) {
-               lwtstate = rt->rt_lwtstate;
-               skb->dev = rt->dst.dev;
-       }
+       return ret;
 
-       skb->protocol = htons(ETH_P_IP);
+drop:
+       kfree_skb(skb);
 
-       return __lwtunnel_output(sk, skb, lwtstate);
+       return ret;
 }
-EXPORT_SYMBOL(lwtunnel_output);
+EXPORT_SYMBOL(lwtunnel_input);
index 84195dacb8b63f418cac67d4039842dd72eaecc4..2b515ba7e94f4d1a15a3021a6e4a7732af1026c0 100644 (file)
@@ -274,8 +274,12 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
            (entries >= tbl->gc_thresh2 &&
             time_after(now, tbl->last_flush + 5 * HZ))) {
                if (!neigh_forced_gc(tbl) &&
-                   entries >= tbl->gc_thresh3)
+                   entries >= tbl->gc_thresh3) {
+                       net_info_ratelimited("%s: neighbor table overflow!\n",
+                                            tbl->id);
+                       NEIGH_CACHE_STAT_INC(tbl, table_fulls);
                        goto out_entries;
+               }
        }
 
        n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
@@ -1849,6 +1853,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                        ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
                        ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
                        ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
+                       ndst.ndts_table_fulls           += st->table_fulls;
                }
 
                if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
@@ -2717,12 +2722,12 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
        struct neigh_statistics *st = v;
 
        if (v == SEQ_START_TOKEN) {
-               seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
+               seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
                return 0;
        }
 
        seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
-                       "%08lx %08lx  %08lx %08lx %08lx\n",
+                       "%08lx %08lx  %08lx %08lx %08lx %08lx\n",
                   atomic_read(&tbl->entries),
 
                   st->allocs,
@@ -2739,7 +2744,8 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
 
                   st->periodic_gc_runs,
                   st->forced_gc_runs,
-                  st->unres_discards
+                  st->unres_discards,
+                  st->table_fulls
                   );
 
        return 0;
index 194c1d03b2b3b1e78254fb0108682e4dfa3ab776..b279077c30894dfeb69e2c21686d702cc809e678 100644 (file)
@@ -689,7 +689,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
        struct rps_map *old_map, *map;
        cpumask_var_t mask;
        int err, cpu, i;
-       static DEFINE_SPINLOCK(rps_map_lock);
+       static DEFINE_MUTEX(rps_map_mutex);
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -722,18 +722,21 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
                map = NULL;
        }
 
-       spin_lock(&rps_map_lock);
+       mutex_lock(&rps_map_mutex);
        old_map = rcu_dereference_protected(queue->rps_map,
-                                           lockdep_is_held(&rps_map_lock));
+                                           mutex_is_locked(&rps_map_mutex));
        rcu_assign_pointer(queue->rps_map, map);
-       spin_unlock(&rps_map_lock);
 
        if (map)
                static_key_slow_inc(&rps_needed);
-       if (old_map) {
-               kfree_rcu(old_map, rcu);
+       if (old_map)
                static_key_slow_dec(&rps_needed);
-       }
+
+       mutex_unlock(&rps_map_mutex);
+
+       if (old_map)
+               kfree_rcu(old_map, rcu);
+
        free_cpumask_var(mask);
        return len;
 }
index 0e0fb30cbc04084c96d6ae8ceec94a2e493a3bbf..de8d5cc5eb240555e2c8b30da7858b3ff9eadffb 100644 (file)
@@ -3513,8 +3513,6 @@ static int pktgen_thread_worker(void *arg)
 
        set_freezable();
 
-       __set_current_state(TASK_RUNNING);
-
        while (!kthread_should_stop()) {
                pkt_dev = next_to_run(t);
 
@@ -3559,7 +3557,6 @@ static int pktgen_thread_worker(void *arg)
 
                try_to_freeze();
        }
-       set_current_state(TASK_INTERRUPTIBLE);
 
        pr_debug("%s stopping all device\n", t->tsk->comm);
        pktgen_stop(t);
index 87b22c0bc08c2f33fa31948b8b2604f48b8009bc..b42f0e26f89e4cf2e37a8329da549eb5cd1200c5 100644 (file)
@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
                        spin_lock_bh(&queue->syn_wait_lock);
                        while ((req = lopt->syn_table[i]) != NULL) {
                                lopt->syn_table[i] = req->dl_next;
+                               /* Because of following del_timer_sync(),
+                                * we must release the spinlock here
+                                * or risk a dead lock.
+                                */
+                               spin_unlock_bh(&queue->syn_wait_lock);
                                atomic_inc(&lopt->qlen_dec);
-                               if (del_timer(&req->rsk_timer))
+                               if (del_timer_sync(&req->rsk_timer))
                                        reqsk_put(req);
                                reqsk_put(req);
+                               spin_lock_bh(&queue->syn_wait_lock);
                        }
                        spin_unlock_bh(&queue->syn_wait_lock);
                }
index b6a19ca0f99e49c7406f1fedb6c59433bbd7fd38..bf9a5d93c2d10bbb9c0bfc82d5e7c5df82ef2b5c 100644 (file)
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
  * Otherwise returns the provided skb. Returns NULL in error cases
  * (e.g. transport_len exceeds skb length or out-of-memory).
  *
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
  */
 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
                                               unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
        unsigned int len = skb_transport_offset(skb) + transport_len;
        int ret;
 
-       if (skb->len < len) {
-               kfree_skb(skb);
+       if (skb->len < len)
                return NULL;
-       } else if (skb->len == len) {
+       else if (skb->len == len)
                return skb;
-       }
 
        skb_chk = skb_clone(skb, GFP_ATOMIC);
-       kfree_skb(skb);
-
        if (!skb_chk)
                return NULL;
 
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
  * If the skb has data beyond the given transport length, then a
  * trimmed & cloned skb is checked and returned.
  *
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
  */
 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
                                     unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
 
        skb_chk = skb_checksum_maybe_trim(skb, transport_len);
        if (!skb_chk)
-               return NULL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, offset)) {
-               kfree_skb(skb_chk);
-               return NULL;
-       }
+       if (!pskb_may_pull(skb_chk, offset))
+               goto err;
 
        __skb_pull(skb_chk, offset);
        ret = skb_chkf(skb_chk);
        __skb_push(skb_chk, offset);
 
-       if (ret) {
-               kfree_skb(skb_chk);
-               return NULL;
-       }
+       if (ret)
+               goto err;
 
        return skb_chk;
+
+err:
+       if (skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return NULL;
+
 }
 EXPORT_SYMBOL(skb_checksum_trimmed);
 
index a7732a06804376aa321c9982faaf981ad3c51c87..3dffce953c39fc0209e8d0f553817e972f854303 100644 (file)
@@ -301,7 +301,7 @@ out:
 EXPORT_SYMBOL(in6_pton);
 
 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
-                             __be32 from, __be32 to, int pseudohdr)
+                             __be32 from, __be32 to, bool pseudohdr)
 {
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                csum_replace4(sum, from, to);
@@ -318,7 +318,7 @@ EXPORT_SYMBOL(inet_proto_csum_replace4);
 
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
                               const __be32 *from, const __be32 *to,
-                              int pseudohdr)
+                              bool pseudohdr)
 {
        __be32 diff[] = {
                ~from[0], ~from[1], ~from[2], ~from[3],
@@ -336,6 +336,19 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(inet_proto_csum_replace16);
 
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+                                    __wsum diff, bool pseudohdr)
+{
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+               if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
+                       skb->csum = ~csum_add(diff, ~skb->csum);
+       } else if (pseudohdr) {
+               *sum = ~csum_fold(csum_add(diff, csum_unfold(*sum)));
+       }
+}
+EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
+
 struct __net_random_once_work {
        struct work_struct work;
        struct static_key *key;
index b445d492c115382b9ecd25cbceb3e47053263387..053eb2b8e68281e778f8f605bb3d5ad9b83ae473 100644 (file)
@@ -554,6 +554,31 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
        return 0;
 }
 
+static int dsa_of_probe_links(struct dsa_platform_data *pd,
+                             struct dsa_chip_data *cd,
+                             int chip_index, int port_index,
+                             struct device_node *port,
+                             const char *port_name)
+{
+       struct device_node *link;
+       int link_index;
+       int ret;
+
+       for (link_index = 0;; link_index++) {
+               link = of_parse_phandle(port, "link", link_index);
+               if (!link)
+                       break;
+
+               if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) {
+                       ret = dsa_of_setup_routing_table(pd, cd, chip_index,
+                                                        port_index, link);
+                       if (ret)
+                               return ret;
+               }
+       }
+       return 0;
+}
+
 static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
 {
        int i;
@@ -573,8 +598,8 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
 static int dsa_of_probe(struct device *dev)
 {
        struct device_node *np = dev->of_node;
-       struct device_node *child, *mdio, *ethernet, *port, *link;
-       struct mii_bus *mdio_bus;
+       struct device_node *child, *mdio, *ethernet, *port;
+       struct mii_bus *mdio_bus, *mdio_bus_switch;
        struct net_device *ethernet_dev;
        struct dsa_platform_data *pd;
        struct dsa_chip_data *cd;
@@ -636,6 +661,16 @@ static int dsa_of_probe(struct device *dev)
                if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
                        cd->eeprom_len = eeprom_len;
 
+               mdio = of_parse_phandle(child, "mii-bus", 0);
+               if (mdio) {
+                       mdio_bus_switch = of_mdio_find_bus(mdio);
+                       if (!mdio_bus_switch) {
+                               ret = -EPROBE_DEFER;
+                               goto out_free_chip;
+                       }
+                       cd->host_dev = &mdio_bus_switch->dev;
+               }
+
                for_each_available_child_of_node(child, port) {
                        port_reg = of_get_property(port, "reg", NULL);
                        if (!port_reg)
@@ -658,15 +693,10 @@ static int dsa_of_probe(struct device *dev)
                                goto out_free_chip;
                        }
 
-                       link = of_parse_phandle(port, "link", 0);
-
-                       if (!strcmp(port_name, "dsa") && link &&
-                                       pd->nr_chips > 1) {
-                               ret = dsa_of_setup_routing_table(pd, cd,
-                                               chip_index, port_index, link);
-                               if (ret)
-                                       goto out_free_chip;
-                       }
+                       ret = dsa_of_probe_links(pd, cd, chip_index,
+                                                port_index, port, port_name);
+                       if (ret)
+                               goto out_free_chip;
 
                }
        }
index 0010c690cc6715838c76da2d42c93ea9dcc113fc..cce97385f7436445f22c17605b5ee4da48c80cac 100644 (file)
@@ -200,103 +200,212 @@ out:
        return 0;
 }
 
-static int dsa_slave_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-                            struct net_device *dev,
-                            const unsigned char *addr, u16 vid, u16 nlm_flags)
+static int dsa_bridge_check_vlan_range(struct dsa_switch *ds,
+                                      const struct net_device *bridge,
+                                      u16 vid_begin, u16 vid_end)
 {
+       struct dsa_slave_priv *p;
+       struct net_device *dev, *vlan_br;
+       DECLARE_BITMAP(members, DSA_MAX_PORTS);
+       DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
+       u16 vid;
+       int member, err;
+
+       if (!ds->drv->vlan_getnext || !vid_begin)
+               return -EOPNOTSUPP;
+
+       vid = vid_begin - 1;
+
+       do {
+               err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
+               if (err)
+                       break;
+
+               if (vid > vid_end)
+                       break;
+
+               member = find_first_bit(members, DSA_MAX_PORTS);
+               if (member == DSA_MAX_PORTS)
+                       continue;
+
+               dev = ds->ports[member];
+               p = netdev_priv(dev);
+               vlan_br = p->bridge_dev;
+               if (vlan_br == bridge)
+                       continue;
+
+               netdev_dbg(vlan_br, "hardware VLAN %d already in use\n", vid);
+               return -EOPNOTSUPP;
+       } while (vid < vid_end);
+
+       return err == -ENOENT ? 0 : err;
+}
+
+static int dsa_slave_port_vlan_add(struct net_device *dev,
+                                  struct switchdev_obj *obj)
+{
+       struct switchdev_obj_vlan *vlan = &obj->u.vlan;
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct dsa_switch *ds = p->parent;
-       int ret = -EOPNOTSUPP;
+       u16 vid;
+       int err;
 
-       if (ds->drv->fdb_add)
-               ret = ds->drv->fdb_add(ds, p->port, addr, vid);
+       switch (obj->trans) {
+       case SWITCHDEV_TRANS_PREPARE:
+               if (!ds->drv->port_vlan_add || !ds->drv->port_pvid_set)
+                       return -EOPNOTSUPP;
 
-       return ret;
+               /* If the requested port doesn't belong to the same bridge as
+                * the VLAN members, fallback to software VLAN (hopefully).
+                */
+               err = dsa_bridge_check_vlan_range(ds, p->bridge_dev,
+                                                 vlan->vid_begin,
+                                                 vlan->vid_end);
+               if (err)
+                       return err;
+               break;
+       case SWITCHDEV_TRANS_COMMIT:
+               for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+                       err = ds->drv->port_vlan_add(ds, p->port, vid,
+                                                    vlan->flags &
+                                                    BRIDGE_VLAN_INFO_UNTAGGED);
+                       if (!err && vlan->flags & BRIDGE_VLAN_INFO_PVID)
+                               err = ds->drv->port_pvid_set(ds, p->port, vid);
+                       if (err)
+                               return err;
+               }
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
 }
 
-static int dsa_slave_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-                            struct net_device *dev,
-                            const unsigned char *addr, u16 vid)
+static int dsa_slave_port_vlan_del(struct net_device *dev,
+                                  struct switchdev_obj *obj)
 {
+       struct switchdev_obj_vlan *vlan = &obj->u.vlan;
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct dsa_switch *ds = p->parent;
-       int ret = -EOPNOTSUPP;
+       u16 vid;
+       int err;
+
+       if (!ds->drv->port_vlan_del)
+               return -EOPNOTSUPP;
 
-       if (ds->drv->fdb_del)
-               ret = ds->drv->fdb_del(ds, p->port, addr, vid);
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+               err = ds->drv->port_vlan_del(ds, p->port, vid);
+               if (err)
+                       return err;
+       }
 
-       return ret;
+       return 0;
 }
 
-static int dsa_slave_fill_info(struct net_device *dev, struct sk_buff *skb,
-                              const unsigned char *addr, u16 vid,
-                              bool is_static,
-                              u32 portid, u32 seq, int type,
-                              unsigned int flags)
+static int dsa_slave_port_vlan_dump(struct net_device *dev,
+                                   struct switchdev_obj *obj)
 {
-       struct nlmsghdr *nlh;
-       struct ndmsg *ndm;
+       struct switchdev_obj_vlan *vlan = &obj->u.vlan;
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       DECLARE_BITMAP(members, DSA_MAX_PORTS);
+       DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
+       u16 pvid, vid = 0;
+       int err;
 
-       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
-       if (!nlh)
-               return -EMSGSIZE;
+       if (!ds->drv->vlan_getnext || !ds->drv->port_pvid_get)
+               return -EOPNOTSUPP;
 
-       ndm = nlmsg_data(nlh);
-       ndm->ndm_family  = AF_BRIDGE;
-       ndm->ndm_pad1    = 0;
-       ndm->ndm_pad2    = 0;
-       ndm->ndm_flags   = NTF_EXT_LEARNED;
-       ndm->ndm_type    = 0;
-       ndm->ndm_ifindex = dev->ifindex;
-       ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
+       err = ds->drv->port_pvid_get(ds, p->port, &pvid);
+       if (err)
+               return err;
 
-       if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
-               goto nla_put_failure;
+       for (;;) {
+               err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
+               if (err)
+                       break;
 
-       if (vid && nla_put_u16(skb, NDA_VLAN, vid))
-               goto nla_put_failure;
+               if (!test_bit(p->port, members))
+                       continue;
 
-       nlmsg_end(skb, nlh);
-       return 0;
+               memset(vlan, 0, sizeof(*vlan));
+               vlan->vid_begin = vlan->vid_end = vid;
 
-nla_put_failure:
-       nlmsg_cancel(skb, nlh);
-       return -EMSGSIZE;
+               if (vid == pvid)
+                       vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+               if (test_bit(p->port, untagged))
+                       vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+               err = obj->cb(dev, obj);
+               if (err)
+                       break;
+       }
+
+       return err == -ENOENT ? 0 : err;
 }
 
-/* Dump information about entries, in response to GETNEIGH */
-static int dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
-                             struct net_device *dev,
-                             struct net_device *filter_dev, int idx)
+static int dsa_slave_port_fdb_add(struct net_device *dev,
+                                 struct switchdev_obj *obj)
+{
+       struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       int ret = -EOPNOTSUPP;
+
+       if (obj->trans == SWITCHDEV_TRANS_PREPARE)
+               ret = ds->drv->port_fdb_add ? 0 : -EOPNOTSUPP;
+       else if (obj->trans == SWITCHDEV_TRANS_COMMIT)
+               ret = ds->drv->port_fdb_add(ds, p->port, fdb->addr, fdb->vid);
+
+       return ret;
+}
+
+static int dsa_slave_port_fdb_del(struct net_device *dev,
+                                 struct switchdev_obj *obj)
+{
+       struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       int ret = -EOPNOTSUPP;
+
+       if (ds->drv->port_fdb_del)
+               ret = ds->drv->port_fdb_del(ds, p->port, fdb->addr, fdb->vid);
+
+       return ret;
+}
+
+static int dsa_slave_port_fdb_dump(struct net_device *dev,
+                                  struct switchdev_obj *obj)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct dsa_switch *ds = p->parent;
        unsigned char addr[ETH_ALEN] = { 0 };
+       u16 vid = 0;
        int ret;
 
-       if (!ds->drv->fdb_getnext)
+       if (!ds->drv->port_fdb_getnext)
                return -EOPNOTSUPP;
 
-       for (; ; idx++) {
+       for (;;) {
                bool is_static;
 
-               ret = ds->drv->fdb_getnext(ds, p->port, addr, &is_static);
+               ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid,
+                                               &is_static);
                if (ret < 0)
                        break;
 
-               if (idx < cb->args[0])
-                       continue;
+               obj->u.fdb.addr = addr;
+               obj->u.fdb.vid = vid;
+               obj->u.fdb.ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
 
-               ret = dsa_slave_fill_info(dev, skb, addr, 0,
-                                         is_static,
-                                         NETLINK_CB(cb->skb).portid,
-                                         cb->nlh->nlmsg_seq,
-                                         RTM_NEWNEIGH, NLM_F_MULTI);
+               ret = obj->cb(dev, obj);
                if (ret < 0)
                        break;
        }
 
-       return idx;
+       return ret == -ENOENT ? 0 : ret;
 }
 
 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -364,6 +473,71 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
        return ret;
 }
 
+static int dsa_slave_port_obj_add(struct net_device *dev,
+                                 struct switchdev_obj *obj)
+{
+       int err;
+
+       /* For the prepare phase, ensure the full set of changes is feasable in
+        * one go in order to signal a failure properly. If an operation is not
+        * supported, return -EOPNOTSUPP.
+        */
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_PORT_FDB:
+               err = dsa_slave_port_fdb_add(dev, obj);
+               break;
+       case SWITCHDEV_OBJ_PORT_VLAN:
+               err = dsa_slave_port_vlan_add(dev, obj);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
+static int dsa_slave_port_obj_del(struct net_device *dev,
+                                 struct switchdev_obj *obj)
+{
+       int err;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_PORT_FDB:
+               err = dsa_slave_port_fdb_del(dev, obj);
+               break;
+       case SWITCHDEV_OBJ_PORT_VLAN:
+               err = dsa_slave_port_vlan_del(dev, obj);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
+static int dsa_slave_port_obj_dump(struct net_device *dev,
+                                  struct switchdev_obj *obj)
+{
+       int err;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_PORT_FDB:
+               err = dsa_slave_port_fdb_dump(dev, obj);
+               break;
+       case SWITCHDEV_OBJ_PORT_VLAN:
+               err = dsa_slave_port_vlan_dump(dev, obj);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
 static int dsa_slave_bridge_port_join(struct net_device *dev,
                                      struct net_device *br)
 {
@@ -765,9 +939,9 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
        .ndo_change_rx_flags    = dsa_slave_change_rx_flags,
        .ndo_set_rx_mode        = dsa_slave_set_rx_mode,
        .ndo_set_mac_address    = dsa_slave_set_mac_address,
-       .ndo_fdb_add            = dsa_slave_fdb_add,
-       .ndo_fdb_del            = dsa_slave_fdb_del,
-       .ndo_fdb_dump           = dsa_slave_fdb_dump,
+       .ndo_fdb_add            = switchdev_port_fdb_add,
+       .ndo_fdb_del            = switchdev_port_fdb_del,
+       .ndo_fdb_dump           = switchdev_port_fdb_dump,
        .ndo_do_ioctl           = dsa_slave_ioctl,
        .ndo_get_iflink         = dsa_slave_get_iflink,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -775,11 +949,17 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
        .ndo_netpoll_cleanup    = dsa_slave_netpoll_cleanup,
        .ndo_poll_controller    = dsa_slave_poll_controller,
 #endif
+       .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
+       .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
+       .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
 };
 
 static const struct switchdev_ops dsa_slave_switchdev_ops = {
        .switchdev_port_attr_get        = dsa_slave_port_attr_get,
        .switchdev_port_attr_set        = dsa_slave_port_attr_set,
+       .switchdev_port_obj_add         = dsa_slave_port_obj_add,
+       .switchdev_port_obj_del         = dsa_slave_port_obj_del,
+       .switchdev_port_obj_dump        = dsa_slave_port_obj_dump,
 };
 
 static void dsa_slave_adjust_link(struct net_device *dev)
@@ -834,7 +1014,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
                return -ENODEV;
 
        /* Use already configured phy mode */
-       p->phy_interface = p->phy->interface;
+       if (p->phy_interface == PHY_INTERFACE_MODE_NA)
+               p->phy_interface = p->phy->interface;
        phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
                           p->phy_interface);
 
@@ -966,7 +1147,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
        slave_dev->features = master->vlan_features;
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
        eth_hw_addr_inherit(slave_dev, master);
-       slave_dev->tx_queue_len = 0;
+       slave_dev->priv_flags |= IFF_NO_QUEUE;
        slave_dev->netdev_ops = &dsa_slave_netdev_ops;
        slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
 
index 77e0f0e7a88e2cfd32755b1dffbe83aeb89be24b..217127c3a3ef346475a8d0f4c41e8b9da2328b0c 100644 (file)
@@ -114,7 +114,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
 EXPORT_SYMBOL(eth_header);
 
 /**
- * eth_get_headlen - determine the the length of header for an ethernet frame
+ * eth_get_headlen - determine the length of header for an ethernet frame
  * @data: pointer to start of frame
  * @len: total length of frame
  *
index 44d27469ae55982d1895021b79ba76a85c1324a8..35a9788bb3ae734d8e5b2f5199901a6c47f7a587 100644 (file)
@@ -392,7 +392,7 @@ void hsr_dev_setup(struct net_device *dev)
        dev->header_ops = &hsr_header_ops;
        dev->netdev_ops = &hsr_device_ops;
        SET_NETDEV_DEVTYPE(dev, &hsr_type);
-       dev->tx_queue_len = 0;
+       dev->priv_flags |= IFF_NO_QUEUE;
 
        dev->destructor = hsr_dev_destroy;
 
index e50f69da78eb50f86b8dee51fcfa6730f37dfad3..ea339fa94c27c5006ca8121073c5d4b69a9b8e23 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <net/ieee802154_netdev.h>
 #include <net/inet_frag.h>
+#include <net/6lowpan.h>
 
 struct lowpan_create_arg {
        u16 tag;
@@ -37,26 +38,18 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
        }
 }
 
-struct lowpan_dev_record {
-       struct net_device *ldev;
-       struct list_head list;
-};
-
 /* private device info */
 struct lowpan_dev_info {
        struct net_device       *real_dev; /* real WPAN device ptr */
-       struct mutex            dev_list_mtx; /* mutex for list ops */
        u16                     fragment_tag;
 };
 
 static inline struct
 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 {
-       return netdev_priv(dev);
+       return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
 }
 
-extern struct list_head lowpan_devices;
-
 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
 void lowpan_net_frag_exit(void);
 int lowpan_net_frag_init(void);
index f20a387a1011021347af182060f3b8f4ceda7183..953b1c49f5d1e42019752d86021fd7690272232e 100644 (file)
@@ -52,8 +52,7 @@
 
 #include "6lowpan_i.h"
 
-LIST_HEAD(lowpan_devices);
-static int lowpan_open_count;
+static int open_count;
 
 static struct header_ops lowpan_header_ops = {
        .create = lowpan_header_create,
@@ -91,7 +90,7 @@ static void lowpan_setup(struct net_device *dev)
        dev->hard_header_len    = 2 + 1 + 20 + 14;
        dev->needed_tailroom    = 2; /* FCS */
        dev->mtu                = IPV6_MIN_MTU;
-       dev->tx_queue_len       = 0;
+       dev->priv_flags         |= IFF_NO_QUEUE;
        dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
        dev->watchdog_timeo     = 0;
 
@@ -114,7 +113,6 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
                          struct nlattr *tb[], struct nlattr *data[])
 {
        struct net_device *real_dev;
-       struct lowpan_dev_record *entry;
        int ret;
 
        ASSERT_RTNL();
@@ -133,67 +131,52 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
                return -EINVAL;
        }
 
-       lowpan_dev_info(dev)->real_dev = real_dev;
-       mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry) {
+       if (real_dev->ieee802154_ptr->lowpan_dev) {
                dev_put(real_dev);
-               lowpan_dev_info(dev)->real_dev = NULL;
-               return -ENOMEM;
+               return -EBUSY;
        }
 
-       entry->ldev = dev;
-
+       lowpan_dev_info(dev)->real_dev = real_dev;
        /* Set the lowpan hardware address to the wpan hardware address. */
        memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
 
-       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-       INIT_LIST_HEAD(&entry->list);
-       list_add_tail(&entry->list, &lowpan_devices);
-       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+       lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154);
 
        ret = register_netdevice(dev);
-       if (ret >= 0) {
-               if (!lowpan_open_count)
-                       lowpan_rx_init();
-               lowpan_open_count++;
+       if (ret < 0) {
+               dev_put(real_dev);
+               return ret;
        }
 
-       return ret;
+       real_dev->ieee802154_ptr->lowpan_dev = dev;
+       if (!open_count)
+               lowpan_rx_init();
+
+       open_count++;
+
+       return 0;
 }
 
 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
 {
        struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
        struct net_device *real_dev = lowpan_dev->real_dev;
-       struct lowpan_dev_record *entry, *tmp;
 
        ASSERT_RTNL();
 
-       lowpan_open_count--;
-       if (!lowpan_open_count)
-               lowpan_rx_exit();
-
-       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-       list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-               if (entry->ldev == dev) {
-                       list_del(&entry->list);
-                       kfree(entry);
-               }
-       }
-       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+       open_count--;
 
-       mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       unregister_netdevice_queue(dev, head);
+       if (!open_count)
+               lowpan_rx_exit();
 
+       real_dev->ieee802154_ptr->lowpan_dev = NULL;
+       unregister_netdevice(dev);
        dev_put(real_dev);
 }
 
 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
        .kind           = "lowpan",
-       .priv_size      = sizeof(struct lowpan_dev_info),
+       .priv_size      = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
        .setup          = lowpan_setup,
        .newlink        = lowpan_newlink,
        .dellink        = lowpan_dellink,
@@ -214,19 +197,21 @@ static int lowpan_device_event(struct notifier_block *unused,
                               unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       LIST_HEAD(del_list);
-       struct lowpan_dev_record *entry, *tmp;
 
        if (dev->type != ARPHRD_IEEE802154)
                goto out;
 
-       if (event == NETDEV_UNREGISTER) {
-               list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-                       if (lowpan_dev_info(entry->ldev)->real_dev == dev)
-                               lowpan_dellink(entry->ldev, &del_list);
-               }
-
-               unregister_netdevice_many(&del_list);
+       switch (event) {
+       case NETDEV_UNREGISTER:
+               /* Check if wpan interface is unregistered that we
+                * also delete possible lowpan interfaces which belongs
+                * to the wpan interface.
+                */
+               if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev)
+                       lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL);
+               break;
+       default:
+               break;
        }
 
 out:
index 4be1d289ab2df7581d93702d46bf3793abeba1a7..12e10201d263860d64cc7835bd947459e4d95873 100644 (file)
 
 #include "6lowpan_i.h"
 
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
-                                     struct net_device *dev)
+static int lowpan_give_skb_to_device(struct sk_buff *skb,
+                                    struct net_device *dev)
 {
-       struct lowpan_dev_record *entry;
-       struct sk_buff *skb_cp;
-       int stat = NET_RX_SUCCESS;
-
+       skb->dev = dev->ieee802154_ptr->lowpan_dev;
        skb->protocol = htons(ETH_P_IPV6);
        skb->pkt_type = PACKET_HOST;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(entry, &lowpan_devices, list)
-               if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
-                       skb_cp = skb_copy(skb, GFP_ATOMIC);
-                       if (!skb_cp) {
-                               kfree_skb(skb);
-                               rcu_read_unlock();
-                               return NET_RX_DROP;
-                       }
-
-                       skb_cp->dev = entry->ldev;
-                       stat = netif_rx(skb_cp);
-                       if (stat == NET_RX_DROP)
-                               break;
-               }
-       rcu_read_unlock();
-
-       consume_skb(skb);
-
-       return stat;
+       return netif_rx(skb);
 }
 
 static int
@@ -89,6 +67,10 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        struct ieee802154_hdr hdr;
        int ret;
 
+       if (dev->type != ARPHRD_IEEE802154 ||
+           !dev->ieee802154_ptr->lowpan_dev)
+               goto drop;
+
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (!skb)
                goto drop;
@@ -99,9 +81,6 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        if (skb->pkt_type == PACKET_OTHERHOST)
                goto drop_skb;
 
-       if (dev->type != ARPHRD_IEEE802154)
-               goto drop_skb;
-
        if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
                goto drop_skb;
 
@@ -109,7 +88,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
                /* Pull off the 1-byte of 6lowpan header. */
                skb_pull(skb, 1);
-               return lowpan_give_skb_to_devices(skb, NULL);
+               return lowpan_give_skb_to_device(skb, dev);
        } else {
                switch (skb->data[0] & 0xe0) {
                case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
@@ -117,7 +96,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
                        if (ret < 0)
                                goto drop_skb;
 
-                       return lowpan_give_skb_to_devices(skb, NULL);
+                       return lowpan_give_skb_to_device(skb, dev);
                case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
                        ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
                        if (ret == 1) {
@@ -125,7 +104,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
                                if (ret < 0)
                                        goto drop_skb;
 
-                               return lowpan_give_skb_to_devices(skb, NULL);
+                               return lowpan_give_skb_to_device(skb, dev);
                        } else if (ret == -1) {
                                return NET_RX_DROP;
                        } else {
@@ -138,7 +117,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
                                if (ret < 0)
                                        goto drop_skb;
 
-                               return lowpan_give_skb_to_devices(skb, NULL);
+                               return lowpan_give_skb_to_device(skb, dev);
                        } else if (ret == -1) {
                                return NET_RX_DROP;
                        } else {
index 2597abbf7f4bbbd121486306994f5e568de4d37e..f6263fc1234056e89cd2c06046699c4fa1d75b39 100644 (file)
@@ -112,7 +112,7 @@ lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
 
        frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
        if (IS_ERR(frag))
-               return -PTR_ERR(frag);
+               return PTR_ERR(frag);
 
        memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
        memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
@@ -224,7 +224,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
        } else {
                da.mode = IEEE802154_ADDR_LONG;
                da.extended_addr = ieee802154_devaddr_from_raw(daddr);
-               cb->ackreq = wpan_dev->frame_retries >= 0;
+               cb->ackreq = wpan_dev->ackreq;
        }
 
        return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
index 68f24016860c30c7800b36d45046cf3e7f23b8c9..1b00a14850cb5f097b557079225c55c1abadab40 100644 (file)
@@ -230,6 +230,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
        [NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
 
        [NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
+
+       [NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
 };
 
 /* message building helper */
@@ -458,6 +460,7 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
        CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
        CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
        CMD(set_lbt_mode, SET_LBT_MODE);
+       CMD(set_ackreq_default, SET_ACKREQ_DEFAULT);
 
        if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
                CMD(set_tx_power, SET_TX_POWER);
@@ -656,6 +659,10 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
        if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
                goto nla_put_failure;
 
+       /* ackreq default behaviour */
+       if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
+               goto nla_put_failure;
+
        genlmsg_end(msg, hdr);
        return 0;
 
@@ -1042,6 +1049,24 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
        return rdev_set_lbt_mode(rdev, wpan_dev, mode);
 }
 
+static int
+nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg802154_registered_device *rdev = info->user_ptr[0];
+       struct net_device *dev = info->user_ptr[1];
+       struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+       bool ackreq;
+
+       if (netif_running(dev))
+               return -EBUSY;
+
+       if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT])
+               return -EINVAL;
+
+       ackreq = !!nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]);
+       return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
+}
+
 #define NL802154_FLAG_NEED_WPAN_PHY    0x01
 #define NL802154_FLAG_NEED_NETDEV      0x02
 #define NL802154_FLAG_NEED_RTNL                0x04
@@ -1248,6 +1273,14 @@ static const struct genl_ops nl802154_ops[] = {
                .internal_flags = NL802154_FLAG_NEED_NETDEV |
                                  NL802154_FLAG_NEED_RTNL,
        },
+       {
+               .cmd = NL802154_CMD_SET_ACKREQ_DEFAULT,
+               .doit = nl802154_set_ackreq_default,
+               .policy = nl802154_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL802154_FLAG_NEED_NETDEV |
+                                 NL802154_FLAG_NEED_RTNL,
+       },
 };
 
 /* initialisation/exit functions */
index 8d5960a37195136380032644b65a8b741a03ab00..03b357501cc55dc99c29a642d89975cbd2da87a0 100644 (file)
@@ -195,4 +195,17 @@ rdev_set_lbt_mode(struct cfg802154_registered_device *rdev,
        return ret;
 }
 
+static inline int
+rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
+                       struct wpan_dev *wpan_dev, bool ackreq)
+{
+       int ret;
+
+       trace_802154_rdev_set_ackreq_default(&rdev->wpan_phy, wpan_dev,
+                                            ackreq);
+       ret = rdev->ops->set_ackreq_default(&rdev->wpan_phy, wpan_dev, ackreq);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
 #endif /* __CFG802154_RDEV_OPS */
index 4399b7fbaa31481c402079680e3509ed05fb9479..9a471e41ec737270a2d9f364564187f2736aaf19 100644 (file)
@@ -275,6 +275,25 @@ TRACE_EVENT(802154_rdev_set_lbt_mode,
                WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
 );
 
+TRACE_EVENT(802154_rdev_set_ackreq_default,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+                bool ackreq),
+       TP_ARGS(wpan_phy, wpan_dev, ackreq),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               WPAN_DEV_ENTRY
+               __field(bool, ackreq)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               WPAN_DEV_ASSIGN;
+               __entry->ackreq = ackreq;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+               ", ackreq default: %s", WPAN_PHY_PR_ARG,
+               WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq))
+);
+
 TRACE_EVENT(802154_rdev_return_int,
        TP_PROTO(struct wpan_phy *wpan_phy, int ret),
        TP_ARGS(wpan_phy, ret),
index cc4e498a0ccf390115c2f6f0306248650f4a2c35..675e88cac2b469707ea49cd8e3c695e1b91776fc 100644 (file)
 #ifdef CONFIG_IP_MROUTE
 #include <linux/mroute.h>
 #endif
+#include <net/vrf.h>
 
 
 /* The inetsw table contains everything that inet_create needs to
@@ -427,6 +428,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        struct net *net = sock_net(sk);
        unsigned short snum;
        int chk_addr_ret;
+       int tb_id = RT_TABLE_LOCAL;
        int err;
 
        /* If the socket has its own bind function then use it. (RAW) */
@@ -448,7 +450,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                        goto out;
        }
 
-       chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+       tb_id = vrf_dev_table_ifindex(net, sk->sk_bound_dev_if) ? : tb_id;
+       chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
 
        /* Not specified by any standard per-se, however it breaks too
         * many applications when removed.  It is unfortunate since
index 34a308573f4b0e7bcae0ad9ffa5e45b19c72c1d1..30409b75e92503cca0daacc48010a63936c6aa20 100644 (file)
@@ -233,7 +233,7 @@ static int arp_constructor(struct neighbour *neigh)
                return -EINVAL;
        }
 
-       neigh->type = inet_addr_type(dev_net(dev), addr);
+       neigh->type = inet_addr_type_dev_table(dev_net(dev), dev, addr);
 
        parms = in_dev->arp_parms;
        __neigh_parms_put(neigh->parms);
@@ -343,7 +343,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
        switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
        default:
        case 0:         /* By default announce any local IP */
-               if (skb && inet_addr_type(dev_net(dev),
+               if (skb && inet_addr_type_dev_table(dev_net(dev), dev,
                                          ip_hdr(skb)->saddr) == RTN_LOCAL)
                        saddr = ip_hdr(skb)->saddr;
                break;
@@ -351,7 +351,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
                if (!skb)
                        break;
                saddr = ip_hdr(skb)->saddr;
-               if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) {
+               if (inet_addr_type_dev_table(dev_net(dev), dev,
+                                            saddr) == RTN_LOCAL) {
                        /* saddr should be known to target */
                        if (inet_addr_onlink(in_dev, target, saddr))
                                break;
@@ -751,7 +752,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
        /* Special case: IPv4 duplicate address detection packet (RFC2131) */
        if (sip == 0) {
                if (arp->ar_op == htons(ARPOP_REQUEST) &&
-                   inet_addr_type(net, tip) == RTN_LOCAL &&
+                   inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL &&
                    !arp_ignore(in_dev, sip, tip))
                        arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
                                 dev->dev_addr, sha);
@@ -811,16 +812,18 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
        n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
 
        if (IN_DEV_ARP_ACCEPT(in_dev)) {
+               unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip);
+
                /* Unsolicited ARP is not accepted by default.
                   It is possible, that this option should be enabled for some
                   devices (strip is candidate)
                 */
                is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
-                         inet_addr_type(net, sip) == RTN_UNICAST;
+                         addr_type == RTN_UNICAST;
 
                if (!n &&
                    ((arp->ar_op == htons(ARPOP_REPLY)  &&
-                     inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
+                               addr_type == RTN_UNICAST) || is_garp))
                        n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
        }
 
index 6b98de0d79498d575a44d6c20bd3abb5a38ea75c..7fa277176c33ba95008cf1627e47ee3b2bb417e6 100644 (file)
@@ -45,6 +45,7 @@
 #include <net/ip_fib.h>
 #include <net/rtnetlink.h>
 #include <net/xfrm.h>
+#include <net/vrf.h>
 
 #ifndef CONFIG_IP_MULTIPLE_TABLES
 
@@ -211,12 +212,12 @@ void fib_flush_external(struct net *net)
  */
 static inline unsigned int __inet_dev_addr_type(struct net *net,
                                                const struct net_device *dev,
-                                               __be32 addr)
+                                               __be32 addr, int tb_id)
 {
        struct flowi4           fl4 = { .daddr = addr };
        struct fib_result       res;
        unsigned int ret = RTN_BROADCAST;
-       struct fib_table *local_table;
+       struct fib_table *table;
 
        if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
                return RTN_BROADCAST;
@@ -225,10 +226,10 @@ static inline unsigned int __inet_dev_addr_type(struct net *net,
 
        rcu_read_lock();
 
-       local_table = fib_get_table(net, RT_TABLE_LOCAL);
-       if (local_table) {
+       table = fib_get_table(net, tb_id);
+       if (table) {
                ret = RTN_UNICAST;
-               if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
+               if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
                        if (!dev || dev == res.fi->fib_dev)
                                ret = res.type;
                }
@@ -238,19 +239,40 @@ static inline unsigned int __inet_dev_addr_type(struct net *net,
        return ret;
 }
 
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, int tb_id)
+{
+       return __inet_dev_addr_type(net, NULL, addr, tb_id);
+}
+EXPORT_SYMBOL(inet_addr_type_table);
+
 unsigned int inet_addr_type(struct net *net, __be32 addr)
 {
-       return __inet_dev_addr_type(net, NULL, addr);
+       return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL);
 }
 EXPORT_SYMBOL(inet_addr_type);
 
 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
                                __be32 addr)
 {
-       return __inet_dev_addr_type(net, dev, addr);
+       int rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+
+       return __inet_dev_addr_type(net, dev, addr, rt_table);
 }
 EXPORT_SYMBOL(inet_dev_addr_type);
 
+/* inet_addr_type with dev == NULL but using the table from a dev
+ * if one is associated
+ */
+unsigned int inet_addr_type_dev_table(struct net *net,
+                                     const struct net_device *dev,
+                                     __be32 addr)
+{
+       int rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+
+       return __inet_dev_addr_type(net, NULL, addr, rt_table);
+}
+EXPORT_SYMBOL(inet_addr_type_dev_table);
+
 __be32 fib_compute_spec_dst(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
@@ -309,7 +331,9 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
        bool dev_match;
 
        fl4.flowi4_oif = 0;
-       fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
+       fl4.flowi4_iif = vrf_master_ifindex_rcu(dev);
+       if (!fl4.flowi4_iif)
+               fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
        fl4.daddr = src;
        fl4.saddr = dst;
        fl4.flowi4_tos = tos;
@@ -339,6 +363,9 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
                if (nh->nh_dev == dev) {
                        dev_match = true;
                        break;
+               } else if (vrf_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
+                       dev_match = true;
+                       break;
                }
        }
 #else
@@ -496,9 +523,12 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
 
        addr = sk_extract_addr(&rt->rt_gateway);
        if (rt->rt_gateway.sa_family == AF_INET && addr) {
+               unsigned int addr_type;
+
                cfg->fc_gw = addr;
+               addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
                if (rt->rt_flags & RTF_GATEWAY &&
-                   inet_addr_type(net, addr) == RTN_UNICAST)
+                   addr_type == RTN_UNICAST)
                        cfg->fc_scope = RT_SCOPE_UNIVERSE;
        }
 
@@ -770,6 +800,7 @@ out:
 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
 {
        struct net *net = dev_net(ifa->ifa_dev->dev);
+       int tb_id = vrf_dev_table_rtnl(ifa->ifa_dev->dev);
        struct fib_table *tb;
        struct fib_config cfg = {
                .fc_protocol = RTPROT_KERNEL,
@@ -784,11 +815,10 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad
                },
        };
 
-       if (type == RTN_UNICAST)
-               tb = fib_new_table(net, RT_TABLE_MAIN);
-       else
-               tb = fib_new_table(net, RT_TABLE_LOCAL);
+       if (!tb_id)
+               tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL;
 
+       tb = fib_new_table(net, tb_id);
        if (!tb)
                return;
 
@@ -970,11 +1000,14 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
                        fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
        }
        if (!(ok & LOCAL_OK)) {
+               unsigned int addr_type;
+
                fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
 
                /* Check, that this local address finally disappeared. */
-               if (gone &&
-                   inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
+               addr_type = inet_addr_type_dev_table(dev_net(dev), dev,
+                                                    ifa->ifa_local);
+               if (gone && addr_type != RTN_LOCAL) {
                        /* And the last, but not the least thing.
                         * We must flush stray FIB entries.
                         *
index 558e196bae0f5a10a6bc81102c657db00b726ad1..1b2d01170a4de8665b0bc5bedbb596058f15e41c 100644 (file)
@@ -511,7 +511,8 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                                        dev = __dev_get_by_index(net, cfg->fc_oif);
                                ret = lwtunnel_build_state(dev, nla_get_u16(
                                                           nla_entype),
-                                                          nla, &lwtstate);
+                                                          nla,  AF_INET, cfg,
+                                                          &lwtstate);
                                if (ret)
                                        goto errout;
                                nexthop_nh->nh_lwtstate =
@@ -533,25 +534,28 @@ errout:
 
 #endif
 
-int fib_encap_match(struct net *net, u16 encap_type,
-                   struct nlattr *encap,
-                   int oif, const struct fib_nh *nh)
+static int fib_encap_match(struct net *net, u16 encap_type,
+                          struct nlattr *encap,
+                          int oif, const struct fib_nh *nh,
+                          const struct fib_config *cfg)
 {
        struct lwtunnel_state *lwtstate;
        struct net_device *dev = NULL;
-       int ret;
+       int ret, result = 0;
 
        if (encap_type == LWTUNNEL_ENCAP_NONE)
                return 0;
 
        if (oif)
                dev = __dev_get_by_index(net, oif);
-       ret = lwtunnel_build_state(dev, encap_type,
-                                  encap, &lwtstate);
-       if (!ret)
-               return lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+       ret = lwtunnel_build_state(dev, encap_type, encap,
+                                  AF_INET, cfg, &lwtstate);
+       if (!ret) {
+               result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+               lwtstate_free(lwtstate);
+       }
 
-       return 0;
+       return result;
 }
 
 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
@@ -569,7 +573,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
                if (cfg->fc_encap) {
                        if (fib_encap_match(net, cfg->fc_encap_type,
                                            cfg->fc_encap, cfg->fc_oif,
-                                           fi->fib_nh))
+                                           fi->fib_nh, cfg))
                            return 1;
                }
                if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
@@ -661,7 +665,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                        struct fib_nh *nh)
 {
-       int err;
+       int err = 0;
        struct net *net;
        struct net_device *dev;
 
@@ -670,16 +674,18 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                struct fib_result res;
 
                if (nh->nh_flags & RTNH_F_ONLINK) {
+                       unsigned int addr_type;
 
                        if (cfg->fc_scope >= RT_SCOPE_LINK)
                                return -EINVAL;
-                       if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
-                               return -EINVAL;
                        dev = __dev_get_by_index(net, nh->nh_oif);
                        if (!dev)
                                return -ENODEV;
                        if (!(dev->flags & IFF_UP))
                                return -ENETDOWN;
+                       addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
+                       if (addr_type != RTN_UNICAST)
+                               return -EINVAL;
                        if (!netif_carrier_ok(dev))
                                nh->nh_flags |= RTNH_F_LINKDOWN;
                        nh->nh_dev = dev;
@@ -689,6 +695,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                }
                rcu_read_lock();
                {
+                       struct fib_table *tbl = NULL;
                        struct flowi4 fl4 = {
                                .daddr = nh->nh_gw,
                                .flowi4_scope = cfg->fc_scope + 1,
@@ -699,8 +706,24 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                        /* It is not necessary, but requires a bit of thinking */
                        if (fl4.flowi4_scope < RT_SCOPE_LINK)
                                fl4.flowi4_scope = RT_SCOPE_LINK;
-                       err = fib_lookup(net, &fl4, &res,
-                                        FIB_LOOKUP_IGNORE_LINKSTATE);
+
+                       if (cfg->fc_table)
+                               tbl = fib_get_table(net, cfg->fc_table);
+
+                       if (tbl)
+                               err = fib_table_lookup(tbl, &fl4, &res,
+                                                      FIB_LOOKUP_IGNORE_LINKSTATE |
+                                                      FIB_LOOKUP_NOREF);
+
+                       /* on error or if no table given do full lookup. This
+                        * is needed for example when nexthops are in the local
+                        * table rather than the given table
+                        */
+                       if (!tbl || err) {
+                               err = fib_lookup(net, &fl4, &res,
+                                                FIB_LOOKUP_IGNORE_LINKSTATE);
+                       }
+
                        if (err) {
                                rcu_read_unlock();
                                return err;
@@ -836,6 +859,23 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
        return nh->nh_saddr;
 }
 
+static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
+{
+       if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
+           fib_prefsrc != cfg->fc_dst) {
+               int tb_id = cfg->fc_table;
+
+               if (tb_id == RT_TABLE_MAIN)
+                       tb_id = RT_TABLE_LOCAL;
+
+               if (inet_addr_type_table(cfg->fc_nlinfo.nl_net,
+                                        fib_prefsrc, tb_id) != RTN_LOCAL) {
+                       return false;
+               }
+       }
+       return true;
+}
+
 struct fib_info *fib_create_info(struct fib_config *cfg)
 {
        int err;
@@ -967,7 +1007,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                        if (cfg->fc_oif)
                                dev = __dev_get_by_index(net, cfg->fc_oif);
                        err = lwtunnel_build_state(dev, cfg->fc_encap_type,
-                                                  cfg->fc_encap, &lwtstate);
+                                                  cfg->fc_encap, AF_INET, cfg,
+                                                  &lwtstate);
                        if (err)
                                goto failure;
 
@@ -1031,12 +1072,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                        fi->fib_flags |= RTNH_F_LINKDOWN;
        }
 
-       if (fi->fib_prefsrc) {
-               if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
-                   fi->fib_prefsrc != cfg->fc_dst)
-                       if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
-                               goto err_inval;
-       }
+       if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc))
+               goto err_inval;
 
        change_nexthops(fi) {
                fib_info_update_nh_saddr(net, nexthop_nh);
index 37c4bb89a7082bbe36b40d928f7fd1d95bfe8252..5154f81c53266841ae52913bda220dcf99244a8c 100644 (file)
@@ -1423,8 +1423,11 @@ found:
                            nh->nh_flags & RTNH_F_LINKDOWN &&
                            !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
                                continue;
-                       if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
-                               continue;
+                       if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
+                               if (flp->flowi4_oif &&
+                                   flp->flowi4_oif != nh->nh_oif)
+                                       continue;
+                       }
 
                        if (!(fib_flags & FIB_LOOKUP_NOREF))
                                atomic_inc(&fi->fib_clntref);
@@ -2465,7 +2468,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
                key = l->key + 1;
                iter->pos++;
 
-               if (pos-- <= 0)
+               if (--pos <= 0)
                        break;
 
                l = NULL;
index 34968cd5c1464bf896ba1a2130fc223c50973499..2d1646cff0572054cc2982b764d817094c002146 100644 (file)
@@ -79,7 +79,11 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
        __be16 *pd = data;
        size_t start = ntohs(pd[0]);
        size_t offset = ntohs(pd[1]);
-       size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+       size_t plen = sizeof(struct udphdr) + hdrlen +
+           max_t(size_t, offset + sizeof(u16), start);
+
+       if (skb->remcsum_offload)
+               return guehdr;
 
        if (!pskb_may_pull(skb, plen))
                return NULL;
@@ -221,29 +225,21 @@ out_unlock:
 
 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
                                      struct guehdr *guehdr, void *data,
-                                     size_t hdrlen, u8 ipproto,
-                                     struct gro_remcsum *grc, bool nopartial)
+                                     size_t hdrlen, struct gro_remcsum *grc,
+                                     bool nopartial)
 {
        __be16 *pd = data;
        size_t start = ntohs(pd[0]);
        size_t offset = ntohs(pd[1]);
-       size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
        if (skb->remcsum_offload)
-               return NULL;
+               return guehdr;
 
        if (!NAPI_GRO_CB(skb)->csum_valid)
                return NULL;
 
-       /* Pull checksum that will be written */
-       if (skb_gro_header_hard(skb, off + plen)) {
-               guehdr = skb_gro_header_slow(skb, off + plen, off);
-               if (!guehdr)
-                       return NULL;
-       }
-
-       skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
-                               start, offset, grc, nopartial);
+       guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
+                                        start, offset, grc, nopartial);
 
        skb->remcsum_offload = 1;
 
@@ -307,10 +303,10 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
 
                if (flags & GUE_PFLAG_REMCSUM) {
                        guehdr = gue_gro_remcsum(skb, off, guehdr,
-                                                data + doffset, hdrlen,
-                                                guehdr->proto_ctype, &grc,
+                                                data + doffset, hdrlen, &grc,
                                                 !!(fou->flags &
                                                    FOU_F_REMCSUM_NOPARTIAL));
+
                        if (!guehdr)
                                goto out;
 
@@ -351,7 +347,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[guehdr->proto_ctype]);
-       if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+       if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
                goto out_unlock;
 
        pp = ops->callbacks.gro_receive(head, skb);
index 4a7b5b2a1ce3ddbd79e9282051020a4509dd7fe5..d9c552a721fcb19f17ac02c1972be935d8f2f11e 100644 (file)
@@ -31,7 +31,6 @@
 #include <net/xfrm.h>
 
 static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
-static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX];
 
 int gre_add_protocol(const struct gre_protocol *proto, u8 version)
 {
@@ -61,197 +60,6 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
 }
 EXPORT_SYMBOL_GPL(gre_del_protocol);
 
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
-                     int hdr_len)
-{
-       struct gre_base_hdr *greh;
-
-       skb_push(skb, hdr_len);
-
-       skb_reset_transport_header(skb);
-       greh = (struct gre_base_hdr *)skb->data;
-       greh->flags = tnl_flags_to_gre_flags(tpi->flags);
-       greh->protocol = tpi->proto;
-
-       if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
-               __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
-               if (tpi->flags&TUNNEL_SEQ) {
-                       *ptr = tpi->seq;
-                       ptr--;
-               }
-               if (tpi->flags&TUNNEL_KEY) {
-                       *ptr = tpi->key;
-                       ptr--;
-               }
-               if (tpi->flags&TUNNEL_CSUM &&
-                   !(skb_shinfo(skb)->gso_type &
-                     (SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) {
-                       *ptr = 0;
-                       *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
-                                                                skb->len, 0));
-               }
-       }
-}
-EXPORT_SYMBOL_GPL(gre_build_header);
-
-static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-                           bool *csum_err)
-{
-       const struct gre_base_hdr *greh;
-       __be32 *options;
-       int hdr_len;
-
-       if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
-               return -EINVAL;
-
-       greh = (struct gre_base_hdr *)skb_transport_header(skb);
-       if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
-               return -EINVAL;
-
-       tpi->flags = gre_flags_to_tnl_flags(greh->flags);
-       hdr_len = ip_gre_calc_hlen(tpi->flags);
-
-       if (!pskb_may_pull(skb, hdr_len))
-               return -EINVAL;
-
-       greh = (struct gre_base_hdr *)skb_transport_header(skb);
-       tpi->proto = greh->protocol;
-
-       options = (__be32 *)(greh + 1);
-       if (greh->flags & GRE_CSUM) {
-               if (skb_checksum_simple_validate(skb)) {
-                       *csum_err = true;
-                       return -EINVAL;
-               }
-
-               skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
-                                        null_compute_pseudo);
-
-               options++;
-       }
-
-       if (greh->flags & GRE_KEY) {
-               tpi->key = *options;
-               options++;
-       } else
-               tpi->key = 0;
-
-       if (unlikely(greh->flags & GRE_SEQ)) {
-               tpi->seq = *options;
-               options++;
-       } else
-               tpi->seq = 0;
-
-       /* WCCP version 1 and 2 protocol decoding.
-        * - Change protocol to IP
-        * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
-        */
-       if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
-               tpi->proto = htons(ETH_P_IP);
-               if ((*(u8 *)options & 0xF0) != 0x40) {
-                       hdr_len += 4;
-                       if (!pskb_may_pull(skb, hdr_len))
-                               return -EINVAL;
-               }
-       }
-
-       return iptunnel_pull_header(skb, hdr_len, tpi->proto);
-}
-
-static int gre_cisco_rcv(struct sk_buff *skb)
-{
-       struct tnl_ptk_info tpi;
-       int i;
-       bool csum_err = false;
-
-#ifdef CONFIG_NET_IPGRE_BROADCAST
-       if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
-               /* Looped back packet, drop it! */
-               if (rt_is_output_route(skb_rtable(skb)))
-                       goto drop;
-       }
-#endif
-
-       if (parse_gre_header(skb, &tpi, &csum_err) < 0)
-               goto drop;
-
-       rcu_read_lock();
-       for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
-               struct gre_cisco_protocol *proto;
-               int ret;
-
-               proto = rcu_dereference(gre_cisco_proto_list[i]);
-               if (!proto)
-                       continue;
-               ret = proto->handler(skb, &tpi);
-               if (ret == PACKET_RCVD) {
-                       rcu_read_unlock();
-                       return 0;
-               }
-       }
-       rcu_read_unlock();
-
-       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
-drop:
-       kfree_skb(skb);
-       return 0;
-}
-
-static void gre_cisco_err(struct sk_buff *skb, u32 info)
-{
-       /* All the routers (except for Linux) return only
-        * 8 bytes of packet payload. It means, that precise relaying of
-        * ICMP in the real Internet is absolutely infeasible.
-        *
-        * Moreover, Cisco "wise men" put GRE key to the third word
-        * in GRE header. It makes impossible maintaining even soft
-        * state for keyed
-        * GRE tunnels with enabled checksum. Tell them "thank you".
-        *
-        * Well, I wonder, rfc1812 was written by Cisco employee,
-        * what the hell these idiots break standards established
-        * by themselves???
-        */
-
-       const int type = icmp_hdr(skb)->type;
-       const int code = icmp_hdr(skb)->code;
-       struct tnl_ptk_info tpi;
-       bool csum_err = false;
-       int i;
-
-       if (parse_gre_header(skb, &tpi, &csum_err)) {
-               if (!csum_err)          /* ignore csum errors. */
-                       return;
-       }
-
-       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                               skb->dev->ifindex, 0, IPPROTO_GRE, 0);
-               return;
-       }
-       if (type == ICMP_REDIRECT) {
-               ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
-                               IPPROTO_GRE, 0);
-               return;
-       }
-
-       rcu_read_lock();
-       for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
-               struct gre_cisco_protocol *proto;
-
-               proto = rcu_dereference(gre_cisco_proto_list[i]);
-               if (!proto)
-                       continue;
-
-               if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD)
-                       goto out;
-
-       }
-out:
-       rcu_read_unlock();
-}
-
 static int gre_rcv(struct sk_buff *skb)
 {
        const struct gre_protocol *proto;
@@ -302,60 +110,19 @@ static const struct net_protocol net_gre_protocol = {
        .netns_ok    = 1,
 };
 
-static const struct gre_protocol ipgre_protocol = {
-       .handler     = gre_cisco_rcv,
-       .err_handler = gre_cisco_err,
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *newp)
-{
-       struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
-                                           &gre_cisco_proto_list[newp->priority];
-
-       return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_register);
-
-int gre_cisco_unregister(struct gre_cisco_protocol *del_proto)
-{
-       struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
-                                           &gre_cisco_proto_list[del_proto->priority];
-       int ret;
-
-       ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL;
-
-       if (ret)
-               return ret;
-
-       synchronize_net();
-       return 0;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_unregister);
-
 static int __init gre_init(void)
 {
        pr_info("GRE over IPv4 demultiplexor driver\n");
 
        if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
                pr_err("can't add protocol\n");
-               goto err;
-       }
-
-       if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) {
-               pr_info("%s: can't add ipgre handler\n", __func__);
-               goto err_gre;
+               return -EAGAIN;
        }
-
        return 0;
-err_gre:
-       inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
-err:
-       return -EAGAIN;
 }
 
 static void __exit gre_exit(void)
 {
-       gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
        inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
 }
 
index c0556f1e4bf09233970c8d5c3fd68afa9a78489f..f16488efa1c8930c23aea42a770e935fa41578b2 100644 (file)
@@ -96,6 +96,7 @@
 #include <net/xfrm.h>
 #include <net/inet_common.h>
 #include <net/ip_fib.h>
+#include <net/vrf.h>
 
 /*
  *     Build xmit assembly blocks
@@ -425,6 +426,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        fl4.flowi4_mark = mark;
        fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
        fl4.flowi4_proto = IPPROTO_ICMP;
+       fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex;
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
        rt = ip_route_output_key(net, &fl4);
        if (IS_ERR(rt))
@@ -458,6 +460,8 @@ static struct rtable *icmp_route_lookup(struct net *net,
        fl4->flowi4_proto = IPPROTO_ICMP;
        fl4->fl4_icmp_type = type;
        fl4->fl4_icmp_code = code;
+       fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex;
+
        security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
        rt = __ip_route_output_key(net, fl4);
        if (IS_ERR(rt))
@@ -480,7 +484,8 @@ static struct rtable *icmp_route_lookup(struct net *net,
        if (err)
                goto relookup_failed;
 
-       if (inet_addr_type(net, fl4_dec.saddr) == RTN_LOCAL) {
+       if (inet_addr_type_dev_table(net, skb_in->dev,
+                                    fl4_dec.saddr) == RTN_LOCAL) {
                rt2 = __ip_route_output_key(net, &fl4_dec);
                if (IS_ERR(rt2))
                        err = PTR_ERR(rt2);
@@ -829,7 +834,7 @@ static bool icmp_unreach(struct sk_buff *skb)
         */
 
        if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
-           inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
+           inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
                net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
                                     &ip_hdr(skb)->saddr,
                                     icmph->type, icmph->code,
index 651cdf648ec4728bff6e709b0324b7d52ffd65ed..9fdfd9deac11dde85bc62803068fbe50e45837b8 100644 (file)
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
        struct sk_buff *skb_chk;
        unsigned int transport_len;
        unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
-       int ret;
+       int ret = -EINVAL;
 
        transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
 
-       skb_get(skb);
        skb_chk = skb_checksum_trimmed(skb, transport_len,
                                       ip_mc_validate_checksum);
        if (!skb_chk)
-               return -EINVAL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, len)) {
-               kfree_skb(skb_chk);
-               return -EINVAL;
-       }
+       if (!pskb_may_pull(skb_chk, len))
+               goto err;
 
        ret = ip_mc_check_igmp_msg(skb_chk);
-       if (ret) {
-               kfree_skb(skb_chk);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        if (skb_trimmed)
                *skb_trimmed = skb_chk;
-       else
+       /* free now unneeded clone */
+       else if (skb_chk != skb)
                kfree_skb(skb_chk);
 
-       return 0;
+       ret = 0;
+
+err:
+       if (ret && skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return ret;
 }
 
 /**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
  * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
  *
  * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
  *
  * -EINVAL: A broken packet was detected, i.e. it violates some internet
  *  standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
  * to leave the original skb and its full frame unchanged (which might be
  * desirable for layer 2 frame jugglers).
  *
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
  */
 int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
 {
index 60021d0d9326ac691dcef21e1f9c20de5f8fe7c6..134957159c27eb9180e08b73360fe891574b4742 100644 (file)
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
        }
 
        spin_unlock(&queue->syn_wait_lock);
-       if (del_timer(&req->rsk_timer))
+       if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
                reqsk_put(req);
        return found;
 }
index d96722ae89796ef27ec725a695b60fbce4c47fbc..15762e758861b8d7101996a8faef749e2e8c7c3e 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/inet.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/inet_ecn.h>
+#include <net/vrf.h>
 
 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
  * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
@@ -77,6 +78,7 @@ struct ipq {
        u8              ecn; /* RFC3168 support */
        u16             max_df_size; /* largest frag with DF set seen */
        int             iif;
+       int             vif;   /* VRF device index */
        unsigned int    rid;
        struct inet_peer *peer;
 };
@@ -99,6 +101,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
 struct ip4_create_arg {
        struct iphdr *iph;
        u32 user;
+       int vif;
 };
 
 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
@@ -127,7 +130,8 @@ static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
                qp->saddr == arg->iph->saddr &&
                qp->daddr == arg->iph->daddr &&
                qp->protocol == arg->iph->protocol &&
-               qp->user == arg->user;
+               qp->user == arg->user &&
+               qp->vif == arg->vif;
 }
 
 static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
@@ -144,6 +148,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
        qp->ecn = ip4_frag_ecn(arg->iph->tos);
        qp->saddr = arg->iph->saddr;
        qp->daddr = arg->iph->daddr;
+       qp->vif = arg->vif;
        qp->user = arg->user;
        qp->peer = sysctl_ipfrag_max_dist ?
                inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
@@ -244,7 +249,8 @@ out:
 /* Find the correct entry in the "incomplete datagrams" queue for
  * this IP datagram, and create new one, if nothing is found.
  */
-static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
+static struct ipq *ip_find(struct net *net, struct iphdr *iph,
+                          u32 user, int vif)
 {
        struct inet_frag_queue *q;
        struct ip4_create_arg arg;
@@ -252,6 +258,7 @@ static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
 
        arg.iph = iph;
        arg.user = user;
+       arg.vif = vif;
 
        hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
 
@@ -648,14 +655,15 @@ out_fail:
 /* Process an incoming IP datagram fragment. */
 int ip_defrag(struct sk_buff *skb, u32 user)
 {
+       struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
+       int vif = vrf_master_ifindex_rcu(dev);
+       struct net *net = dev_net(dev);
        struct ipq *qp;
-       struct net *net;
 
-       net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
        /* Lookup (or create) queue header */
-       qp = ip_find(net, ip_hdr(skb), user);
+       qp = ip_find(net, ip_hdr(skb), user, vif);
        if (qp) {
                int ret;
 
index 5fd706473c733402c9aad9c6c30466549de8c54d..1bf328182697bdaeb5bace4a8ac9e7c0d726a69d 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/udp.h>
 #include <linux/if_arp.h>
 #include <linux/mroute.h>
+#include <linux/if_vlan.h>
 #include <linux/init.h>
 #include <linux/in6.h>
 #include <linux/inetdevice.h>
@@ -47,6 +48,7 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 #include <net/gre.h>
+#include <net/dst_metadata.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
@@ -121,8 +123,127 @@ static int ipgre_tunnel_init(struct net_device *dev);
 static int ipgre_net_id __read_mostly;
 static int gre_tap_net_id __read_mostly;
 
-static int ipgre_err(struct sk_buff *skb, u32 info,
-                    const struct tnl_ptk_info *tpi)
+static int ip_gre_calc_hlen(__be16 o_flags)
+{
+       int addend = 4;
+
+       if (o_flags & TUNNEL_CSUM)
+               addend += 4;
+       if (o_flags & TUNNEL_KEY)
+               addend += 4;
+       if (o_flags & TUNNEL_SEQ)
+               addend += 4;
+       return addend;
+}
+
+static __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+       __be16 tflags = 0;
+
+       if (flags & GRE_CSUM)
+               tflags |= TUNNEL_CSUM;
+       if (flags & GRE_ROUTING)
+               tflags |= TUNNEL_ROUTING;
+       if (flags & GRE_KEY)
+               tflags |= TUNNEL_KEY;
+       if (flags & GRE_SEQ)
+               tflags |= TUNNEL_SEQ;
+       if (flags & GRE_STRICT)
+               tflags |= TUNNEL_STRICT;
+       if (flags & GRE_REC)
+               tflags |= TUNNEL_REC;
+       if (flags & GRE_VERSION)
+               tflags |= TUNNEL_VERSION;
+
+       return tflags;
+}
+
+static __be16 tnl_flags_to_gre_flags(__be16 tflags)
+{
+       __be16 flags = 0;
+
+       if (tflags & TUNNEL_CSUM)
+               flags |= GRE_CSUM;
+       if (tflags & TUNNEL_ROUTING)
+               flags |= GRE_ROUTING;
+       if (tflags & TUNNEL_KEY)
+               flags |= GRE_KEY;
+       if (tflags & TUNNEL_SEQ)
+               flags |= GRE_SEQ;
+       if (tflags & TUNNEL_STRICT)
+               flags |= GRE_STRICT;
+       if (tflags & TUNNEL_REC)
+               flags |= GRE_REC;
+       if (tflags & TUNNEL_VERSION)
+               flags |= GRE_VERSION;
+
+       return flags;
+}
+
+static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+                           bool *csum_err)
+{
+       const struct gre_base_hdr *greh;
+       __be32 *options;
+       int hdr_len;
+
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+               return -EINVAL;
+
+       greh = (struct gre_base_hdr *)skb_transport_header(skb);
+       if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
+               return -EINVAL;
+
+       tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+       hdr_len = ip_gre_calc_hlen(tpi->flags);
+
+       if (!pskb_may_pull(skb, hdr_len))
+               return -EINVAL;
+
+       greh = (struct gre_base_hdr *)skb_transport_header(skb);
+       tpi->proto = greh->protocol;
+
+       options = (__be32 *)(greh + 1);
+       if (greh->flags & GRE_CSUM) {
+               if (skb_checksum_simple_validate(skb)) {
+                       *csum_err = true;
+                       return -EINVAL;
+               }
+
+               skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+                                        null_compute_pseudo);
+               options++;
+       }
+
+       if (greh->flags & GRE_KEY) {
+               tpi->key = *options;
+               options++;
+       } else {
+               tpi->key = 0;
+       }
+       if (unlikely(greh->flags & GRE_SEQ)) {
+               tpi->seq = *options;
+               options++;
+       } else {
+               tpi->seq = 0;
+       }
+       /* WCCP version 1 and 2 protocol decoding.
+        * - Change protocol to IP
+        * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+        */
+       if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+               tpi->proto = htons(ETH_P_IP);
+               if ((*(u8 *)options & 0xF0) != 0x40) {
+                       hdr_len += 4;
+                       if (!pskb_may_pull(skb, hdr_len))
+                               return -EINVAL;
+               }
+       }
+       return iptunnel_pull_header(skb, hdr_len, tpi->proto);
+}
+
+static void ipgre_err(struct sk_buff *skb, u32 info,
+                     const struct tnl_ptk_info *tpi)
 {
 
        /* All the routers (except for Linux) return only
@@ -148,14 +269,14 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
        switch (type) {
        default:
        case ICMP_PARAMETERPROB:
-               return PACKET_RCVD;
+               return;
 
        case ICMP_DEST_UNREACH:
                switch (code) {
                case ICMP_SR_FAILED:
                case ICMP_PORT_UNREACH:
                        /* Impossible event. */
-                       return PACKET_RCVD;
+                       return;
                default:
                        /* All others are translated to HOST_UNREACH.
                           rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -164,9 +285,10 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
                        break;
                }
                break;
+
        case ICMP_TIME_EXCEEDED:
                if (code != ICMP_EXC_TTL)
-                       return PACKET_RCVD;
+                       return;
                break;
 
        case ICMP_REDIRECT:
@@ -183,26 +305,85 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
                             iph->daddr, iph->saddr, tpi->key);
 
        if (!t)
-               return PACKET_REJECT;
+               return;
 
        if (t->parms.iph.daddr == 0 ||
            ipv4_is_multicast(t->parms.iph.daddr))
-               return PACKET_RCVD;
+               return;
 
        if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-               return PACKET_RCVD;
+               return;
 
        if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
                t->err_count++;
        else
                t->err_count = 1;
        t->err_time = jiffies;
-       return PACKET_RCVD;
+}
+
+static void gre_err(struct sk_buff *skb, u32 info)
+{
+       /* All the routers (except for Linux) return only
+        * 8 bytes of packet payload. It means, that precise relaying of
+        * ICMP in the real Internet is absolutely infeasible.
+        *
+        * Moreover, Cisco "wise men" put GRE key to the third word
+        * in GRE header. It makes impossible maintaining even soft
+        * state for keyed
+        * GRE tunnels with enabled checksum. Tell them "thank you".
+        *
+        * Well, I wonder, rfc1812 was written by Cisco employee,
+        * what the hell these idiots break standards established
+        * by themselves???
+        */
+
+       const int type = icmp_hdr(skb)->type;
+       const int code = icmp_hdr(skb)->code;
+       struct tnl_ptk_info tpi;
+       bool csum_err = false;
+
+       if (parse_gre_header(skb, &tpi, &csum_err)) {
+               if (!csum_err)          /* ignore csum errors. */
+                       return;
+       }
+
+       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+                                skb->dev->ifindex, 0, IPPROTO_GRE, 0);
+               return;
+       }
+       if (type == ICMP_REDIRECT) {
+               ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
+                             IPPROTO_GRE, 0);
+               return;
+       }
+
+       ipgre_err(skb, info, &tpi);
+}
+
+static __be64 key_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+       return (__force __be64)((__force u32)key);
+#else
+       return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static __be32 tunnel_id_to_key(__be64 x)
+{
+#ifdef __BIG_ENDIAN
+       return (__force __be32)x;
+#else
+       return (__force __be32)((__force u64)x >> 32);
+#endif
 }
 
 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 {
        struct net *net = dev_net(skb->dev);
+       struct metadata_dst *tun_dst = NULL;
        struct ip_tunnel_net *itn;
        const struct iphdr *iph;
        struct ip_tunnel *tunnel;
@@ -218,40 +399,194 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 
        if (tunnel) {
                skb_pop_mac_header(skb);
-               ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
+               if (tunnel->collect_md) {
+                       struct ip_tunnel_info *info;
+
+                       tun_dst = metadata_dst_alloc(0, GFP_ATOMIC);
+                       if (!tun_dst)
+                               return PACKET_REJECT;
+
+                       info = &tun_dst->u.tun_info;
+                       info->key.u.ipv4.src = iph->saddr;
+                       info->key.u.ipv4.dst = iph->daddr;
+                       info->key.tos = iph->tos;
+                       info->key.ttl = iph->ttl;
+
+                       info->mode = IP_TUNNEL_INFO_RX;
+                       info->key.tun_flags = tpi->flags &
+                                             (TUNNEL_CSUM | TUNNEL_KEY);
+                       info->key.tun_id = key_to_tunnel_id(tpi->key);
+
+                       info->key.tp_src = 0;
+                       info->key.tp_dst = 0;
+               }
+
+               ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
                return PACKET_RCVD;
        }
        return PACKET_REJECT;
 }
 
+static int gre_rcv(struct sk_buff *skb)
+{
+       struct tnl_ptk_info tpi;
+       bool csum_err = false;
+
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+       if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+               /* Looped back packet, drop it! */
+               if (rt_is_output_route(skb_rtable(skb)))
+                       goto drop;
+       }
+#endif
+
+       if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+               goto drop;
+
+       if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
+               return 0;
+
+       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
+                        __be16 proto, __be32 key, __be32 seq)
+{
+       struct gre_base_hdr *greh;
+
+       skb_push(skb, hdr_len);
+
+       skb_reset_transport_header(skb);
+       greh = (struct gre_base_hdr *)skb->data;
+       greh->flags = tnl_flags_to_gre_flags(flags);
+       greh->protocol = proto;
+
+       if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+               __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+               if (flags & TUNNEL_SEQ) {
+                       *ptr = seq;
+                       ptr--;
+               }
+               if (flags & TUNNEL_KEY) {
+                       *ptr = key;
+                       ptr--;
+               }
+               if (flags & TUNNEL_CSUM &&
+                   !(skb_shinfo(skb)->gso_type &
+                     (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+                       *ptr = 0;
+                       *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
+                                                                skb->len, 0));
+               }
+       }
+}
+
 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
                       const struct iphdr *tnl_params,
                       __be16 proto)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
-       struct tnl_ptk_info tpi;
 
-       tpi.flags = tunnel->parms.o_flags;
-       tpi.proto = proto;
-       tpi.key = tunnel->parms.o_key;
        if (tunnel->parms.o_flags & TUNNEL_SEQ)
                tunnel->o_seqno++;
-       tpi.seq = htonl(tunnel->o_seqno);
 
        /* Push GRE header. */
-       gre_build_header(skb, &tpi, tunnel->tun_hlen);
-
-       skb_set_inner_protocol(skb, tpi.proto);
+       build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+                    proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
 
+       skb_set_inner_protocol(skb, proto);
        ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 }
 
+static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+                                          bool csum)
+{
+       return iptunnel_handle_offloads(skb, csum,
+                                       csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
+}
+
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip_tunnel_info *tun_info;
+       struct net *net = dev_net(dev);
+       const struct ip_tunnel_key *key;
+       struct flowi4 fl;
+       struct rtable *rt;
+       int min_headroom;
+       int tunnel_hlen;
+       __be16 df, flags;
+       int err;
+
+       tun_info = skb_tunnel_info(skb);
+       if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX))
+               goto err_free_skb;
+
+       key = &tun_info->key;
+       memset(&fl, 0, sizeof(fl));
+       fl.daddr = key->u.ipv4.dst;
+       fl.saddr = key->u.ipv4.src;
+       fl.flowi4_tos = RT_TOS(key->tos);
+       fl.flowi4_mark = skb->mark;
+       fl.flowi4_proto = IPPROTO_GRE;
+
+       rt = ip_route_output_key(net, &fl);
+       if (IS_ERR(rt))
+               goto err_free_skb;
+
+       tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
+
+       min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+                       + tunnel_hlen + sizeof(struct iphdr);
+       if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+               int head_delta = SKB_DATA_ALIGN(min_headroom -
+                                               skb_headroom(skb) +
+                                               16);
+               err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+                                      0, GFP_ATOMIC);
+               if (unlikely(err))
+                       goto err_free_rt;
+       }
+
+       /* Push Tunnel header. */
+       skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
+       if (IS_ERR(skb)) {
+               skb = NULL;
+               goto err_free_rt;
+       }
+
+       flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
+       build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
+                    tunnel_id_to_key(tun_info->key.tun_id), 0);
+
+       df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
+       err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
+                           key->u.ipv4.dst, IPPROTO_GRE,
+                           key->tos, key->ttl, df, false);
+       iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+       return;
+
+err_free_rt:
+       ip_rt_put(rt);
+err_free_skb:
+       kfree_skb(skb);
+       dev->stats.tx_dropped++;
+}
+
 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
                              struct net_device *dev)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        const struct iphdr *tnl_params;
 
+       if (tunnel->collect_md) {
+               gre_fb_xmit(skb, dev);
+               return NETDEV_TX_OK;
+       }
+
        if (dev->header_ops) {
                /* Need space for new headers */
                if (skb_cow_head(skb, dev->needed_headroom -
@@ -277,7 +612,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
                goto out;
 
        __gre_xmit(skb, dev, tnl_params, skb->protocol);
-
        return NETDEV_TX_OK;
 
 free_skb:
@@ -292,6 +626,11 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
+       if (tunnel->collect_md) {
+               gre_fb_xmit(skb, dev);
+               return NETDEV_TX_OK;
+       }
+
        skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
        if (IS_ERR(skb))
                goto out;
@@ -300,7 +639,6 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
                goto free_skb;
 
        __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
-
        return NETDEV_TX_OK;
 
 free_skb:
@@ -530,10 +868,9 @@ static int ipgre_tunnel_init(struct net_device *dev)
        return ip_tunnel_init(dev);
 }
 
-static struct gre_cisco_protocol ipgre_protocol = {
-       .handler        = ipgre_rcv,
-       .err_handler    = ipgre_err,
-       .priority       = 0,
+static const struct gre_protocol ipgre_protocol = {
+       .handler     = gre_rcv,
+       .err_handler = gre_err,
 };
 
 static int __net_init ipgre_init_net(struct net *net)
@@ -596,8 +933,10 @@ out:
        return ipgre_tunnel_validate(tb, data);
 }
 
-static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
-                              struct ip_tunnel_parm *parms)
+static void ipgre_netlink_parms(struct net_device *dev,
+                               struct nlattr *data[],
+                               struct nlattr *tb[],
+                               struct ip_tunnel_parm *parms)
 {
        memset(parms, 0, sizeof(*parms));
 
@@ -635,6 +974,12 @@ static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
 
        if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
                parms->iph.frag_off = htons(IP_DF);
+
+       if (data[IFLA_GRE_COLLECT_METADATA]) {
+               struct ip_tunnel *t = netdev_priv(dev);
+
+               t->collect_md = true;
+       }
 }
 
 /* This function returns true when ENCAP attributes are present in the nl msg */
@@ -712,7 +1057,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev,
                        return err;
        }
 
-       ipgre_netlink_parms(data, tb, &p);
+       ipgre_netlink_parms(dev, data, tb, &p);
        return ip_tunnel_newlink(dev, tb, &p);
 }
 
@@ -730,7 +1075,7 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
                        return err;
        }
 
-       ipgre_netlink_parms(data, tb, &p);
+       ipgre_netlink_parms(dev, data, tb, &p);
        return ip_tunnel_changelink(dev, tb, &p);
 }
 
@@ -765,6 +1110,8 @@ static size_t ipgre_get_size(const struct net_device *dev)
                nla_total_size(2) +
                /* IFLA_GRE_ENCAP_DPORT */
                nla_total_size(2) +
+               /* IFLA_GRE_COLLECT_METADATA */
+               nla_total_size(0) +
                0;
 }
 
@@ -796,6 +1143,11 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
                        t->encap.flags))
                goto nla_put_failure;
 
+       if (t->collect_md) {
+               if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
+                       goto nla_put_failure;
+       }
+
        return 0;
 
 nla_put_failure:
@@ -817,6 +1169,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
        [IFLA_GRE_ENCAP_FLAGS]  = { .type = NLA_U16 },
        [IFLA_GRE_ENCAP_SPORT]  = { .type = NLA_U16 },
        [IFLA_GRE_ENCAP_DPORT]  = { .type = NLA_U16 },
+       [IFLA_GRE_COLLECT_METADATA]     = { .type = NLA_FLAG },
 };
 
 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
@@ -849,9 +1202,38 @@ static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
        .get_link_net   = ip_tunnel_get_link_net,
 };
 
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+                                       u8 name_assign_type)
+{
+       struct nlattr *tb[IFLA_MAX + 1];
+       struct net_device *dev;
+       struct ip_tunnel *t;
+       int err;
+
+       memset(&tb, 0, sizeof(tb));
+
+       dev = rtnl_create_link(net, name, name_assign_type,
+                              &ipgre_tap_ops, tb);
+       if (IS_ERR(dev))
+               return dev;
+
+       /* Configure flow based GRE device. */
+       t = netdev_priv(dev);
+       t->collect_md = true;
+
+       err = ipgre_newlink(net, dev, tb, NULL);
+       if (err < 0)
+               goto out;
+       return dev;
+out:
+       free_netdev(dev);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
+
 static int __net_init ipgre_tap_init_net(struct net *net)
 {
-       return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, NULL);
+       return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
 }
 
 static void __net_exit ipgre_tap_exit_net(struct net *net)
@@ -881,7 +1263,7 @@ static int __init ipgre_init(void)
        if (err < 0)
                goto pnet_tap_faied;
 
-       err = gre_cisco_register(&ipgre_protocol);
+       err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
        if (err < 0) {
                pr_info("%s: can't add protocol\n", __func__);
                goto add_proto_failed;
@@ -900,7 +1282,7 @@ static int __init ipgre_init(void)
 tap_ops_failed:
        rtnl_link_unregister(&ipgre_link_ops);
 rtnl_link_failed:
-       gre_cisco_unregister(&ipgre_protocol);
+       gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
 add_proto_failed:
        unregister_pernet_device(&ipgre_tap_net_ops);
 pnet_tap_faied:
@@ -912,7 +1294,7 @@ static void __exit ipgre_fini(void)
 {
        rtnl_link_unregister(&ipgre_tap_ops);
        rtnl_link_unregister(&ipgre_link_ops);
-       gre_cisco_unregister(&ipgre_protocol);
+       gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
        unregister_pernet_device(&ipgre_tap_net_ops);
        unregister_pernet_device(&ipgre_net_ops);
 }
index 6bf89a6312bc1c71da41ad0a1ebdbf819504367d..0138fada0951b17b175be0b215c248fc7224dc5a 100644 (file)
@@ -1542,6 +1542,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
        struct net *net = sock_net(sk);
        struct sk_buff *nskb;
        int err;
+       int oif;
 
        if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
                return;
@@ -1559,7 +1560,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                        daddr = replyopts.opt.opt.faddr;
        }
 
-       flowi4_init_output(&fl4, arg->bound_dev_if,
+       oif = arg->bound_dev_if;
+       if (!oif && netif_index_is_vrf(net, skb->skb_iif))
+               oif = skb->skb_iif;
+
+       flowi4_init_output(&fl4, oif,
                           IP4_REPLY_MARK(net, skb->mark),
                           RT_TOS(arg->tos),
                           RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
index 626d9e56a6bd2671611f3dde4f8090b76645301c..cbb51f3fac06c66e76204957b3243f50a58384b4 100644 (file)
@@ -230,10 +230,13 @@ skip_key_lookup:
        if (cand)
                return cand;
 
+       t = rcu_dereference(itn->collect_md_tun);
+       if (t)
+               return t;
+
        if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
                return netdev_priv(itn->fb_tunnel_dev);
 
-
        return NULL;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
@@ -261,11 +264,15 @@ static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
 {
        struct hlist_head *head = ip_bucket(itn, &t->parms);
 
+       if (t->collect_md)
+               rcu_assign_pointer(itn->collect_md_tun, t);
        hlist_add_head_rcu(&t->hash_node, head);
 }
 
-static void ip_tunnel_del(struct ip_tunnel *t)
+static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
 {
+       if (t->collect_md)
+               rcu_assign_pointer(itn->collect_md_tun, NULL);
        hlist_del_init_rcu(&t->hash_node);
 }
 
@@ -419,7 +426,8 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
 }
 
 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
-                 const struct tnl_ptk_info *tpi, bool log_ecn_error)
+                 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+                 bool log_ecn_error)
 {
        struct pcpu_sw_netstats *tstats;
        const struct iphdr *iph = ip_hdr(skb);
@@ -478,6 +486,9 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
                skb->dev = tunnel->dev;
        }
 
+       if (tun_dst)
+               skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
        gro_cells_receive(&tunnel->gro_cells, skb);
        return 0;
 
@@ -806,7 +817,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
                             struct ip_tunnel_parm *p,
                             bool set_mtu)
 {
-       ip_tunnel_del(t);
+       ip_tunnel_del(itn, t);
        t->parms.iph.saddr = p->iph.saddr;
        t->parms.iph.daddr = p->iph.daddr;
        t->parms.i_key = p->i_key;
@@ -967,7 +978,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
        itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
 
        if (itn->fb_tunnel_dev != dev) {
-               ip_tunnel_del(netdev_priv(dev));
+               ip_tunnel_del(itn, netdev_priv(dev));
                unregister_netdevice_queue(dev, head);
        }
 }
@@ -1072,8 +1083,13 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
        nt = netdev_priv(dev);
        itn = net_generic(net, nt->ip_tnl_net_id);
 
-       if (ip_tunnel_find(itn, p, dev->type))
-               return -EEXIST;
+       if (nt->collect_md) {
+               if (rtnl_dereference(itn->collect_md_tun))
+                       return -EEXIST;
+       } else {
+               if (ip_tunnel_find(itn, p, dev->type))
+                       return -EEXIST;
+       }
 
        nt->net = net;
        nt->parms = *p;
@@ -1089,7 +1105,6 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
                dev->mtu = mtu;
 
        ip_tunnel_add(itn, nt);
-
 out:
        return err;
 }
@@ -1163,6 +1178,10 @@ int ip_tunnel_init(struct net_device *dev)
        iph->version            = 4;
        iph->ihl                = 5;
 
+       if (tunnel->collect_md) {
+               dev->features |= NETIF_F_NETNS_LOCAL;
+               netif_keep_dst(dev);
+       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init);
@@ -1176,7 +1195,7 @@ void ip_tunnel_uninit(struct net_device *dev)
        itn = net_generic(net, tunnel->ip_tnl_net_id);
        /* fb_tunnel_dev will be unregisted in net-exit call. */
        if (itn->fb_tunnel_dev != dev)
-               ip_tunnel_del(netdev_priv(dev));
+               ip_tunnel_del(itn, netdev_priv(dev));
 
        ip_tunnel_dst_reset_all(tunnel);
 }
index 5512f4e4ec1b1b629a9c1fe1c7ac2ac4ffced248..934f2ac8ad610838b8a6611a4fecac5849f3afef 100644 (file)
@@ -192,26 +192,27 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
 
-static const struct nla_policy ip_tun_policy[IP_TUN_MAX + 1] = {
-       [IP_TUN_ID]             = { .type = NLA_U64 },
-       [IP_TUN_DST]            = { .type = NLA_U32 },
-       [IP_TUN_SRC]            = { .type = NLA_U32 },
-       [IP_TUN_TTL]            = { .type = NLA_U8 },
-       [IP_TUN_TOS]            = { .type = NLA_U8 },
-       [IP_TUN_SPORT]          = { .type = NLA_U16 },
-       [IP_TUN_DPORT]          = { .type = NLA_U16 },
-       [IP_TUN_FLAGS]          = { .type = NLA_U16 },
+static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
+       [LWTUNNEL_IP_ID]        = { .type = NLA_U64 },
+       [LWTUNNEL_IP_DST]       = { .type = NLA_U32 },
+       [LWTUNNEL_IP_SRC]       = { .type = NLA_U32 },
+       [LWTUNNEL_IP_TTL]       = { .type = NLA_U8 },
+       [LWTUNNEL_IP_TOS]       = { .type = NLA_U8 },
+       [LWTUNNEL_IP_SPORT]     = { .type = NLA_U16 },
+       [LWTUNNEL_IP_DPORT]     = { .type = NLA_U16 },
+       [LWTUNNEL_IP_FLAGS]     = { .type = NLA_U16 },
 };
 
 static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+                             unsigned int family, const void *cfg,
                              struct lwtunnel_state **ts)
 {
        struct ip_tunnel_info *tun_info;
        struct lwtunnel_state *new_state;
-       struct nlattr *tb[IP_TUN_MAX + 1];
+       struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
        int err;
 
-       err = nla_parse_nested(tb, IP_TUN_MAX, attr, ip_tun_policy);
+       err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy);
        if (err < 0)
                return err;
 
@@ -223,29 +224,29 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
 
        tun_info = lwt_tun_info(new_state);
 
-       if (tb[IP_TUN_ID])
-               tun_info->key.tun_id = nla_get_u64(tb[IP_TUN_ID]);
+       if (tb[LWTUNNEL_IP_ID])
+               tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]);
 
-       if (tb[IP_TUN_DST])
-               tun_info->key.ipv4_dst = nla_get_be32(tb[IP_TUN_DST]);
+       if (tb[LWTUNNEL_IP_DST])
+               tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
 
-       if (tb[IP_TUN_SRC])
-               tun_info->key.ipv4_src = nla_get_be32(tb[IP_TUN_SRC]);
+       if (tb[LWTUNNEL_IP_SRC])
+               tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
 
-       if (tb[IP_TUN_TTL])
-               tun_info->key.ipv4_ttl = nla_get_u8(tb[IP_TUN_TTL]);
+       if (tb[LWTUNNEL_IP_TTL])
+               tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
 
-       if (tb[IP_TUN_TOS])
-               tun_info->key.ipv4_tos = nla_get_u8(tb[IP_TUN_TOS]);
+       if (tb[LWTUNNEL_IP_TOS])
+               tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
 
-       if (tb[IP_TUN_SPORT])
-               tun_info->key.tp_src = nla_get_be16(tb[IP_TUN_SPORT]);
+       if (tb[LWTUNNEL_IP_SPORT])
+               tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
 
-       if (tb[IP_TUN_DPORT])
-               tun_info->key.tp_dst = nla_get_be16(tb[IP_TUN_DPORT]);
+       if (tb[LWTUNNEL_IP_DPORT])
+               tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]);
 
-       if (tb[IP_TUN_FLAGS])
-               tun_info->key.tun_flags = nla_get_u16(tb[IP_TUN_FLAGS]);
+       if (tb[LWTUNNEL_IP_FLAGS])
+               tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
 
        tun_info->mode = IP_TUNNEL_INFO_TX;
        tun_info->options = NULL;
@@ -261,14 +262,14 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
 {
        struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
 
-       if (nla_put_u64(skb, IP_TUN_ID, tun_info->key.tun_id) ||
-           nla_put_be32(skb, IP_TUN_DST, tun_info->key.ipv4_dst) ||
-           nla_put_be32(skb, IP_TUN_SRC, tun_info->key.ipv4_src) ||
-           nla_put_u8(skb, IP_TUN_TOS, tun_info->key.ipv4_tos) ||
-           nla_put_u8(skb, IP_TUN_TTL, tun_info->key.ipv4_ttl) ||
-           nla_put_u16(skb, IP_TUN_SPORT, tun_info->key.tp_src) ||
-           nla_put_u16(skb, IP_TUN_DPORT, tun_info->key.tp_dst) ||
-           nla_put_u16(skb, IP_TUN_FLAGS, tun_info->key.tun_flags))
+       if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
+           nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
+           nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
+           nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
+           nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
+           nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
+           nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
+           nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
                return -ENOMEM;
 
        return 0;
@@ -276,25 +277,135 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
 
 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-       return nla_total_size(8)        /* IP_TUN_ID */
-               + nla_total_size(4)     /* IP_TUN_DST */
-               + nla_total_size(4)     /* IP_TUN_SRC */
-               + nla_total_size(1)     /* IP_TUN_TOS */
-               + nla_total_size(1)     /* IP_TUN_TTL */
-               + nla_total_size(2)     /* IP_TUN_SPORT */
-               + nla_total_size(2)     /* IP_TUN_DPORT */
-               + nla_total_size(2);    /* IP_TUN_FLAGS */
+       return nla_total_size(8)        /* LWTUNNEL_IP_ID */
+               + nla_total_size(4)     /* LWTUNNEL_IP_DST */
+               + nla_total_size(4)     /* LWTUNNEL_IP_SRC */
+               + nla_total_size(1)     /* LWTUNNEL_IP_TOS */
+               + nla_total_size(1)     /* LWTUNNEL_IP_TTL */
+               + nla_total_size(2)     /* LWTUNNEL_IP_SPORT */
+               + nla_total_size(2)     /* LWTUNNEL_IP_DPORT */
+               + nla_total_size(2);    /* LWTUNNEL_IP_FLAGS */
+}
+
+static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+       return memcmp(lwt_tun_info(a), lwt_tun_info(b),
+                     sizeof(struct ip_tunnel_info));
 }
 
 static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
        .build_state = ip_tun_build_state,
        .fill_encap = ip_tun_fill_encap_info,
        .get_encap_size = ip_tun_encap_nlsize,
+       .cmp_encap = ip_tun_cmp_encap,
+};
+
+static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
+       [LWTUNNEL_IP6_ID]               = { .type = NLA_U64 },
+       [LWTUNNEL_IP6_DST]              = { .len = sizeof(struct in6_addr) },
+       [LWTUNNEL_IP6_SRC]              = { .len = sizeof(struct in6_addr) },
+       [LWTUNNEL_IP6_HOPLIMIT]         = { .type = NLA_U8 },
+       [LWTUNNEL_IP6_TC]               = { .type = NLA_U8 },
+       [LWTUNNEL_IP6_SPORT]            = { .type = NLA_U16 },
+       [LWTUNNEL_IP6_DPORT]            = { .type = NLA_U16 },
+       [LWTUNNEL_IP6_FLAGS]            = { .type = NLA_U16 },
+};
+
+static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
+                              unsigned int family, const void *cfg,
+                              struct lwtunnel_state **ts)
+{
+       struct ip_tunnel_info *tun_info;
+       struct lwtunnel_state *new_state;
+       struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy);
+       if (err < 0)
+               return err;
+
+       new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+       if (!new_state)
+               return -ENOMEM;
+
+       new_state->type = LWTUNNEL_ENCAP_IP6;
+
+       tun_info = lwt_tun_info(new_state);
+
+       if (tb[LWTUNNEL_IP6_ID])
+               tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP6_ID]);
+
+       if (tb[LWTUNNEL_IP6_DST])
+               tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
+
+       if (tb[LWTUNNEL_IP6_SRC])
+               tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
+
+       if (tb[LWTUNNEL_IP6_HOPLIMIT])
+               tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
+
+       if (tb[LWTUNNEL_IP6_TC])
+               tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
+
+       if (tb[LWTUNNEL_IP6_SPORT])
+               tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]);
+
+       if (tb[LWTUNNEL_IP6_DPORT])
+               tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]);
+
+       if (tb[LWTUNNEL_IP6_FLAGS])
+               tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
+
+       tun_info->mode = IP_TUNNEL_INFO_TX;
+       tun_info->options = NULL;
+       tun_info->options_len = 0;
+
+       *ts = new_state;
+
+       return 0;
+}
+
+static int ip6_tun_fill_encap_info(struct sk_buff *skb,
+                                  struct lwtunnel_state *lwtstate)
+{
+       struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+       if (nla_put_u64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
+           nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
+           nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
+           nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
+           nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
+           nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) ||
+           nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) ||
+           nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+       return nla_total_size(8)        /* LWTUNNEL_IP6_ID */
+               + nla_total_size(16)    /* LWTUNNEL_IP6_DST */
+               + nla_total_size(16)    /* LWTUNNEL_IP6_SRC */
+               + nla_total_size(1)     /* LWTUNNEL_IP6_HOPLIMIT */
+               + nla_total_size(1)     /* LWTUNNEL_IP6_TC */
+               + nla_total_size(2)     /* LWTUNNEL_IP6_SPORT */
+               + nla_total_size(2)     /* LWTUNNEL_IP6_DPORT */
+               + nla_total_size(2);    /* LWTUNNEL_IP6_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
+       .build_state = ip6_tun_build_state,
+       .fill_encap = ip6_tun_fill_encap_info,
+       .get_encap_size = ip6_tun_encap_nlsize,
+       .cmp_encap = ip_tun_cmp_encap,
 };
 
 void __init ip_tunnel_core_init(void)
 {
        lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+       lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
 }
 
 struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
index 8e7328c6a390a9bc064a67e4ca4263c891239378..ed4ef09c2136ec34871fc240a8197bf694e2d9ef 100644 (file)
@@ -94,7 +94,7 @@
 /* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */
 #define CONF_OPEN_RETRIES      2       /* (Re)open devices twice */
 #define CONF_SEND_RETRIES      6       /* Send six requests per open */
-#define CONF_INTER_TIMEOUT     (HZ/2)  /* Inter-device timeout: 1/2 second */
+#define CONF_INTER_TIMEOUT     (HZ)    /* Inter-device timeout: 1 second */
 #define CONF_BASE_TIMEOUT      (HZ*2)  /* Initial timeout: 2 seconds */
 #define CONF_TIMEOUT_RANDOM    (HZ)    /* Maximum amount of randomization */
 #define CONF_TIMEOUT_MULT      *7/4    /* Rate of timeout growth */
index 254238daf58bd9f6e398609b67ca6a0b946600d2..f34c31defafe083fcc9146affff5c39745150d04 100644 (file)
@@ -198,7 +198,7 @@ static int ipip_rcv(struct sk_buff *skb)
                        goto drop;
                if (iptunnel_pull_header(skb, 0, tpi.proto))
                        goto drop;
-               return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+               return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
        }
 
        return -1;
index 2199a5db25e60412389d861a105d86c62100b396..690d27d3f2f90d99612de8ed4a32dec0596a680a 100644 (file)
@@ -58,6 +58,12 @@ config NFT_REJECT_IPV4
        default NFT_REJECT
        tristate
 
+config NFT_DUP_IPV4
+       tristate "IPv4 nf_tables packet duplication support"
+       select NF_DUP_IPV4
+       help
+         This module enables IPv4 packet duplication support for nf_tables.
+
 endif # NF_TABLES_IPV4
 
 config NF_TABLES_ARP
@@ -67,6 +73,12 @@ config NF_TABLES_ARP
 
 endif # NF_TABLES
 
+config NF_DUP_IPV4
+       tristate "Netfilter IPv4 packet duplication to alternate destination"
+       help
+         This option enables the nf_dup_ipv4 core, which duplicates an IPv4
+         packet to be rerouted to another destination.
+
 config NF_LOG_ARP
        tristate "ARP packet logging"
        default m if NETFILTER_ADVANCED=n
index 7fe6c703528f79f3ba6d355724c26f32e20a21c5..87b073da14c928df176e7f4163e5301edcafdd89 100644 (file)
@@ -41,6 +41,7 @@ obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
 obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
 obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o
 obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
+obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
 obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
 
 # generic IP tables 
@@ -70,3 +71,5 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o
 
 # just filtering instance of ARP tables for now
 obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
+
+obj-$(CONFIG_NF_DUP_IPV4) += nf_dup_ipv4.o
index 4bf3dc49ad1ea84d59815cb9a46aa3cd3fd374b5..270765236f5e8cc9e39c02f9b6fa0836f853f96d 100644 (file)
@@ -72,7 +72,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
                tcph->cwr = einfo->proto.tcp.cwr;
 
        inet_proto_csum_replace2(&tcph->check, skb,
-                                oldval, ((__be16 *)tcph)[6], 0);
+                                oldval, ((__be16 *)tcph)[6], false);
        return true;
 }
 
index fe8cc183411e052f6e0ba4afefbeaef1e77313cd..95ea633e8356eb9b419e4027f9954810194aa23c 100644 (file)
@@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+                         niph, nth, tcp_hdr_size);
 }
 
 static bool
index 30ad9554b5e9931ad37329f0ffda6a8aacdf55d1..8a2caaf3940bedaa9abba13352594a29341d287e 100644 (file)
@@ -280,7 +280,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
                return -EINVAL;
        }
 
-       h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+       h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
        if (h) {
                struct sockaddr_in sin;
                struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
index 80d5554b9a88da301a69db2df7d98f8a3a0a0348..cdde3ec496e94321c424d3dd37b31cb305e05451 100644 (file)
@@ -134,9 +134,11 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
        struct nf_conntrack_tuple innertuple, origtuple;
        const struct nf_conntrack_l4proto *innerproto;
        const struct nf_conntrack_tuple_hash *h;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+       const struct nf_conntrack_zone *zone;
+       struct nf_conntrack_zone tmp;
 
        NF_CT_ASSERT(skb->nfct == NULL);
+       zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 
        /* Are they talking about one of our connections? */
        if (!nf_ct_get_tuplepr(skb,
index b69e82bda2159464b2eb0b0fd7c184c605947cc8..9306ec4fab41e9fa0c3c99fd6be78bcf8adfb397 100644 (file)
@@ -43,19 +43,22 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
 static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
                                              struct sk_buff *skb)
 {
-       u16 zone = NF_CT_DEFAULT_ZONE;
-
+       u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       if (skb->nfct)
-               zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+       if (skb->nfct) {
+               enum ip_conntrack_info ctinfo;
+               const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+               zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+       }
 #endif
        if (nf_bridge_in_prerouting(skb))
-               return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+               return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
 
        if (hooknum == NF_INET_PRE_ROUTING)
-               return IP_DEFRAG_CONNTRACK_IN + zone;
+               return IP_DEFRAG_CONNTRACK_IN + zone_id;
        else
-               return IP_DEFRAG_CONNTRACK_OUT + zone;
+               return IP_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
 static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
new file mode 100644 (file)
index 0000000..b5bb375
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/route.h>
+#include <linux/skbuff.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+       const struct dst_entry *dst;
+
+       if (skb->dev != NULL)
+               return dev_net(skb->dev);
+       dst = skb_dst(skb);
+       if (dst != NULL && dst->dev != NULL)
+               return dev_net(dst->dev);
+#endif
+       return &init_net;
+}
+
+static bool nf_dup_ipv4_route(struct sk_buff *skb, const struct in_addr *gw,
+                             int oif)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct net *net = pick_net(skb);
+       struct rtable *rt;
+       struct flowi4 fl4;
+
+       memset(&fl4, 0, sizeof(fl4));
+       if (oif != -1)
+               fl4.flowi4_oif = oif;
+
+       fl4.daddr = gw->s_addr;
+       fl4.flowi4_tos = RT_TOS(iph->tos);
+       fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+       fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt))
+               return false;
+
+       skb_dst_drop(skb);
+       skb_dst_set(skb, &rt->dst);
+       skb->dev      = rt->dst.dev;
+       skb->protocol = htons(ETH_P_IP);
+
+       return true;
+}
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+                const struct in_addr *gw, int oif)
+{
+       struct iphdr *iph;
+
+       if (this_cpu_read(nf_skb_duplicated))
+               return;
+       /*
+        * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
+        * the original skb, which should continue on its way as if nothing has
+        * happened. The copy should be independently delivered to the gateway.
+        */
+       skb = pskb_copy(skb, GFP_ATOMIC);
+       if (skb == NULL)
+               return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       /* Avoid counting cloned packets towards the original connection. */
+       nf_conntrack_put(skb->nfct);
+       skb->nfct     = &nf_ct_untracked_get()->ct_general;
+       skb->nfctinfo = IP_CT_NEW;
+       nf_conntrack_get(skb->nfct);
+#endif
+       /*
+        * If we are in PREROUTING/INPUT, the checksum must be recalculated
+        * since the length could have changed as a result of defragmentation.
+        *
+        * We also decrease the TTL to mitigate potential loops between two
+        * hosts.
+        *
+        * Set %IP_DF so that the original source is notified of a potentially
+        * decreased MTU on the clone route. IPv6 does this too.
+        */
+       iph = ip_hdr(skb);
+       iph->frag_off |= htons(IP_DF);
+       if (hooknum == NF_INET_PRE_ROUTING ||
+           hooknum == NF_INET_LOCAL_IN)
+               --iph->ttl;
+       ip_send_check(iph);
+
+       if (nf_dup_ipv4_route(skb, gw, oif)) {
+               __this_cpu_write(nf_skb_duplicated, true);
+               ip_local_out(skb);
+               __this_cpu_write(nf_skb_duplicated, false);
+       } else {
+               kfree_skb(skb);
+       }
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv4);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv4: Duplicate IPv4 packet");
+MODULE_LICENSE("GPL");
index e59cc05c09e96c8f6996e5e0063c4d138d0dee11..22f4579b0c2aeba3ad637e4cff756042e86e4244 100644 (file)
@@ -120,7 +120,7 @@ static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
                oldip = iph->daddr;
                newip = t->dst.u3.ip;
        }
-       inet_proto_csum_replace4(check, skb, oldip, newip, 1);
+       inet_proto_csum_replace4(check, skb, oldip, newip, true);
 }
 
 static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
@@ -151,7 +151,7 @@ static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
                }
        } else
                inet_proto_csum_replace2(check, skb,
-                                        htons(oldlen), htons(datalen), 1);
+                                        htons(oldlen), htons(datalen), true);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index 4557b4ab8342740696b5fa4d3de8c6218ed70186..7b98baa13edeb1e9b944df54cffaa9ba669cef77 100644 (file)
@@ -67,7 +67,7 @@ icmp_manip_pkt(struct sk_buff *skb,
 
        hdr = (struct icmphdr *)(skb->data + hdroff);
        inet_proto_csum_replace2(&hdr->checksum, skb,
-                                hdr->un.echo.id, tuple->src.u.icmp.id, 0);
+                                hdr->un.echo.id, tuple->src.u.icmp.id, false);
        hdr->un.echo.id = tuple->src.u.icmp.id;
        return true;
 }
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
new file mode 100644 (file)
index 0000000..25419fb
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+
+struct nft_dup_ipv4 {
+       enum nft_registers      sreg_addr:8;
+       enum nft_registers      sreg_dev:8;
+};
+
+static void nft_dup_ipv4_eval(const struct nft_expr *expr,
+                             struct nft_regs *regs,
+                             const struct nft_pktinfo *pkt)
+{
+       struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+       struct in_addr gw = {
+               .s_addr = regs->data[priv->sreg_addr],
+       };
+       int oif = regs->data[priv->sreg_dev];
+
+       nf_dup_ipv4(pkt->skb, pkt->ops->hooknum, &gw, oif);
+}
+
+static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
+                            const struct nft_expr *expr,
+                            const struct nlattr * const tb[])
+{
+       struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+               return -EINVAL;
+
+       priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+       err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr));
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+               priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+               return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+       }
+       return 0;
+}
+
+static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+
+       if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+           nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv4_type;
+static const struct nft_expr_ops nft_dup_ipv4_ops = {
+       .type           = &nft_dup_ipv4_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv4)),
+       .eval           = nft_dup_ipv4_eval,
+       .init           = nft_dup_ipv4_init,
+       .dump           = nft_dup_ipv4_dump,
+};
+
+static const struct nla_policy nft_dup_ipv4_policy[NFTA_DUP_MAX + 1] = {
+       [NFTA_DUP_SREG_ADDR]    = { .type = NLA_U32 },
+       [NFTA_DUP_SREG_DEV]     = { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv4_type __read_mostly = {
+       .family         = NFPROTO_IPV4,
+       .name           = "dup",
+       .ops            = &nft_dup_ipv4_ops,
+       .policy         = nft_dup_ipv4_policy,
+       .maxattr        = NFTA_DUP_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_dup_ipv4_module_init(void)
+{
+       return nft_register_expr(&nft_dup_ipv4_type);
+}
+
+static void __exit nft_dup_ipv4_module_exit(void)
+{
+       nft_unregister_expr(&nft_dup_ipv4_type);
+}
+
+module_init(nft_dup_ipv4_module_init);
+module_exit(nft_dup_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
index 18fd7c9095c706c13240b4624aa291ead0b0171a..f3087aaa6dd86c7aad92814b53aa6cc6494292a1 100644 (file)
 #endif
 #include <net/secure_seq.h>
 #include <net/ip_tunnels.h>
+#include <net/vrf.h>
 
 #define RT_FL_TOS(oldflp4) \
        ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -1358,7 +1359,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
                list_del(&rt->rt_uncached);
                spin_unlock_bh(&ul->lock);
        }
-       lwtstate_put(rt->rt_lwtstate);
 }
 
 void rt_flush_dev(struct net_device *dev)
@@ -1407,7 +1407,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
 #ifdef CONFIG_IP_ROUTE_CLASSID
                rt->dst.tclassid = nh->nh_tclassid;
 #endif
-               rt->rt_lwtstate = lwtstate_get(nh->nh_lwtstate);
+               rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
                if (unlikely(fnhe))
                        cached = rt_bind_exception(rt, fnhe, daddr);
                else if (!(rt->dst.flags & DST_NOCACHE))
@@ -1493,7 +1493,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-       rth->rt_lwtstate = NULL;
        if (our) {
                rth->dst.input= ip_local_deliver;
                rth->rt_flags |= RTCF_LOCAL;
@@ -1623,15 +1622,20 @@ static int __mkroute_input(struct sk_buff *skb,
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-       rth->rt_lwtstate = NULL;
        RT_CACHE_STAT_INC(in_slow_tot);
 
        rth->dst.input = ip_forward;
        rth->dst.output = ip_output;
 
        rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
-       if (lwtunnel_output_redirect(rth->rt_lwtstate))
+       if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
+               rth->dst.lwtstate->orig_output = rth->dst.output;
                rth->dst.output = lwtunnel_output;
+       }
+       if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
+               rth->dst.lwtstate->orig_input = rth->dst.input;
+               rth->dst.input = lwtunnel_input;
+       }
        skb_dst_set(skb, &rth->dst);
 out:
        err = 0;
@@ -1688,7 +1692,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
           by fib_lookup.
         */
 
-       tun_info = skb_tunnel_info(skb, AF_INET);
+       tun_info = skb_tunnel_info(skb);
        if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
                fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
        else
@@ -1726,7 +1730,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
         *      Now we are ready to route packet.
         */
        fl4.flowi4_oif = 0;
-       fl4.flowi4_iif = dev->ifindex;
+       fl4.flowi4_iif = vrf_master_ifindex_rcu(dev) ? : dev->ifindex;
        fl4.flowi4_mark = skb->mark;
        fl4.flowi4_tos = tos;
        fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
@@ -1808,7 +1812,6 @@ local_input:
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-       rth->rt_lwtstate = NULL;
 
        RT_CACHE_STAT_INC(in_slow_tot);
        if (res.type == RTN_UNREACHABLE) {
@@ -1999,7 +2002,6 @@ add:
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-       rth->rt_lwtstate = NULL;
        RT_CACHE_STAT_INC(out_slow_tot);
 
        if (flags & RTCF_LOCAL)
@@ -2022,7 +2024,7 @@ add:
        }
 
        rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
-       if (lwtunnel_output_redirect(rth->rt_lwtstate))
+       if (lwtunnel_output_redirect(rth->dst.lwtstate))
                rth->dst.output = lwtunnel_output;
 
        return rth;
@@ -2130,6 +2132,11 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                                fl4->saddr = inet_select_addr(dev_out, 0,
                                                              RT_SCOPE_HOST);
                }
+               if (netif_is_vrf(dev_out) &&
+                   !(fl4->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
+                       rth = vrf_dev_get_rth(dev_out);
+                       goto out;
+               }
        }
 
        if (!fl4->daddr) {
@@ -2281,7 +2288,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
                rt->rt_uses_gateway = ort->rt_uses_gateway;
 
                INIT_LIST_HEAD(&rt->rt_uncached);
-               rt->rt_lwtstate = NULL;
                dst_free(new);
        }
 
index 433231ccfb17fc6d01179247d1d81226803d18df..0330ab2e2b6329ced120cd9b7100a5a34f50e82b 100644 (file)
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
 static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
-static int min_sndbuf = SOCK_MIN_SNDBUF;
-static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_wmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_sndbuf,
+               .extra1         = &one,
        },
        {
                .procname       = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_rmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_rcvbuf,
+               .extra1         = &one,
        },
        {
                .procname       = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_rmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_rcvbuf,
+               .extra1         = &one
        },
        {
                .procname       = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_wmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_sndbuf,
+               .extra1         = &one
        },
        { }
 };
index d27eb549ced6b4bba76fcd3a4286c8ab0b41478f..93898e093d4e655537665602337de3c6c35cab70 100644 (file)
@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
        if (req) {
                nsk = tcp_check_req(sk, skb, req, false);
-               if (!nsk)
+               if (!nsk || nsk == sk)
                        reqsk_put(req);
                return nsk;
        }
index 7d1efa762b75b04e982c14da36c12595b38dc880..444ab5beecbd0a35355f312152ef6349d9fa9cf0 100644 (file)
@@ -2149,7 +2149,7 @@ repair:
                tcp_cwnd_validate(sk, is_cwnd_limited);
                return false;
        }
-       return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
+       return !tp->packets_out && tcp_send_head(sk);
 }
 
 bool tcp_schedule_loss_probe(struct sock *sk)
@@ -2226,7 +2226,7 @@ static bool skb_still_in_host_queue(const struct sock *sk,
        return false;
 }
 
-/* When probe timeout (PTO) fires, send a new segment if one exists, else
+/* When probe timeout (PTO) fires, try send a new segment if possible, else
  * retransmit the last segment.
  */
 void tcp_send_loss_probe(struct sock *sk)
@@ -2235,11 +2235,19 @@ void tcp_send_loss_probe(struct sock *sk)
        struct sk_buff *skb;
        int pcount;
        int mss = tcp_current_mss(sk);
-       int err = -1;
 
-       if (tcp_send_head(sk)) {
-               err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
-               goto rearm_timer;
+       skb = tcp_send_head(sk);
+       if (skb) {
+               if (tcp_snd_wnd_test(tp, skb, mss)) {
+                       pcount = tp->packets_out;
+                       tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
+                       if (tp->packets_out > pcount)
+                               goto probe_sent;
+                       goto rearm_timer;
+               }
+               skb = tcp_write_queue_prev(sk, skb);
+       } else {
+               skb = tcp_write_queue_tail(sk);
        }
 
        /* At most one outstanding TLP retransmission. */
@@ -2247,7 +2255,6 @@ void tcp_send_loss_probe(struct sock *sk)
                goto rearm_timer;
 
        /* Retransmit last segment. */
-       skb = tcp_write_queue_tail(sk);
        if (WARN_ON(!skb))
                goto rearm_timer;
 
@@ -2262,26 +2269,24 @@ void tcp_send_loss_probe(struct sock *sk)
                if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
                                          GFP_ATOMIC)))
                        goto rearm_timer;
-               skb = tcp_write_queue_tail(sk);
+               skb = tcp_write_queue_next(sk, skb);
        }
 
        if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
                goto rearm_timer;
 
-       err = __tcp_retransmit_skb(sk, skb);
+       if (__tcp_retransmit_skb(sk, skb))
+               goto rearm_timer;
 
        /* Record snd_nxt for loss detection. */
-       if (likely(!err))
-               tp->tlp_high_seq = tp->snd_nxt;
+       tp->tlp_high_seq = tp->snd_nxt;
 
+probe_sent:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
+       /* Reset s.t. tcp_rearm_rto will restart timer from now */
+       inet_csk(sk)->icsk_pending = 0;
 rearm_timer:
-       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-                                 inet_csk(sk)->icsk_rto,
-                                 TCP_RTO_MAX);
-
-       if (likely(!err))
-               NET_INC_STATS_BH(sock_net(sk),
-                                LINUX_MIB_TCPLOSSPROBES);
+       tcp_rearm_rto(sk);
 }
 
 /* Push out any pending frames which were held back due to
index 83aa604f9273c332c5a0e5399253d961ef92eb9a..c0a15e7f359fe54e4edcffca5d59acb418dad116 100644 (file)
@@ -1013,11 +1013,31 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        if (!rt) {
                struct net *net = sock_net(sk);
+               __u8 flow_flags = inet_sk_flowi_flags(sk);
 
                fl4 = &fl4_stack;
+
+               /* unconnected socket. If output device is enslaved to a VRF
+                * device lookup source address from VRF table. This mimics
+                * behavior of ip_route_connect{_init}.
+                */
+               if (netif_index_is_vrf(net, ipc.oif)) {
+                       flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
+                                          RT_SCOPE_UNIVERSE, sk->sk_protocol,
+                                          (flow_flags | FLOWI_FLAG_VRFSRC),
+                                          faddr, saddr, dport,
+                                          inet->inet_sport);
+
+                       rt = ip_route_output_flow(net, fl4, sk);
+                       if (!IS_ERR(rt)) {
+                               saddr = fl4->saddr;
+                               ip_rt_put(rt);
+                       }
+               }
+
                flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
                                   RT_SCOPE_UNIVERSE, sk->sk_protocol,
-                                  inet_sk_flowi_flags(sk),
+                                  flow_flags,
                                   faddr, saddr, dport, inet->inet_sport);
 
                security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
@@ -1995,12 +2015,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
 
        skb->sk = sk;
        skb->destructor = sock_efree;
-       dst = sk->sk_rx_dst;
+       dst = READ_ONCE(sk->sk_rx_dst);
 
        if (dst)
                dst = dst_check(dst, 0);
-       if (dst)
-               skb_dst_set_noref(skb, dst);
+       if (dst) {
+               /* DST_NOCACHE can not be used without taking a reference */
+               if (dst->flags & DST_NOCACHE) {
+                       if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+                               skb_dst_set(skb, dst);
+               } else {
+                       skb_dst_set_noref(skb, dst);
+               }
+       }
 }
 
 int udp_rcv(struct sk_buff *skb)
index bff69746e05f05d936ec8f7a62c34d3f87a55d10..55b3c0f4dde5ba45c0840542f1308613b6f06b8f 100644 (file)
@@ -19,7 +19,7 @@
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
 
 static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
-                                           int tos,
+                                           int tos, int oif,
                                            const xfrm_address_t *saddr,
                                            const xfrm_address_t *daddr)
 {
@@ -28,6 +28,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
        memset(fl4, 0, sizeof(*fl4));
        fl4->daddr = daddr->a4;
        fl4->flowi4_tos = tos;
+       fl4->flowi4_oif = oif;
        if (saddr)
                fl4->saddr = saddr->a4;
 
@@ -38,22 +39,22 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
        return ERR_CAST(rt);
 }
 
-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
                                          const xfrm_address_t *saddr,
                                          const xfrm_address_t *daddr)
 {
        struct flowi4 fl4;
 
-       return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
+       return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
 }
 
-static int xfrm4_get_saddr(struct net *net,
+static int xfrm4_get_saddr(struct net *net, int oif,
                           xfrm_address_t *saddr, xfrm_address_t *daddr)
 {
        struct dst_entry *dst;
        struct flowi4 fl4;
 
-       dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
+       dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
        if (IS_ERR(dst))
                return -EHOSTUNREACH;
 
index 643f61339e7b4fc9d4dcba75c4bb772c99d39292..983bb999738c4dbe1c6b084f5cc75da7d14b1543 100644 (file)
@@ -92,6 +92,25 @@ config IPV6_MIP6
 
          If unsure, say N.
 
+config IPV6_ILA
+       tristate "IPv6: Identifier Locator Addressing (ILA)"
+       select LWTUNNEL
+       ---help---
+         Support for IPv6 Identifier Locator Addressing (ILA).
+
+         ILA is a mechanism to do network virtualization without
+         encapsulation. The basic concept of ILA is that we split an
+         IPv6 address into a 64 bit locator and 64 bit identifier. The
+         identifier is the identity of an entity in communication
+         ("who") and the locator expresses the location of the
+         entity ("where").
+
+         ILA can be configured using the "encap ila" option with
+         "ip -6 route" command. ILA is described in
+         https://tools.ietf.org/html/draft-herbert-nvo3-ila-00.
+
+         If unsure, say N.
+
 config INET6_XFRM_TUNNEL
        tristate
        select INET6_TUNNEL
index 0f3f1999719ac72617b14e68c13f2662f66054a7..2c900c7b7eb1c45b1619a42275552861bdfcda44 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o
 obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
 obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
 obj-$(CONFIG_IPV6_MIP6) += mip6.o
+obj-$(CONFIG_IPV6_ILA) += ila.o
 obj-$(CONFIG_NETFILTER)        += netfilter/
 
 obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
index 53e3a9d756b0d804e873c80a820383b756a0624b..0f08d3b9e23826914c057efa7db4e7e946883bd8 100644 (file)
@@ -214,6 +214,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
                .initialized = false,
        },
        .use_oif_addrs_only     = 0,
+       .ignore_routes_with_linkdown = 0,
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -257,6 +258,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
                .initialized = false,
        },
        .use_oif_addrs_only     = 0,
+       .ignore_routes_with_linkdown = 0,
 };
 
 /* Check if a valid qdisc is available */
@@ -472,6 +474,9 @@ static int inet6_netconf_msgsize_devconf(int type)
        if (type == -1 || type == NETCONFA_PROXY_NEIGH)
                size += nla_total_size(4);
 
+       if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
+               size += nla_total_size(4);
+
        return size;
 }
 
@@ -508,6 +513,11 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
            nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
                goto nla_put_failure;
 
+       if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
+           nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+                       devconf->ignore_routes_with_linkdown) < 0)
+               goto nla_put_failure;
+
        nlmsg_end(skb, nlh);
        return 0;
 
@@ -544,6 +554,7 @@ static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
        [NETCONFA_IFINDEX]      = { .len = sizeof(int) },
        [NETCONFA_FORWARDING]   = { .len = sizeof(int) },
        [NETCONFA_PROXY_NEIGH]  = { .len = sizeof(int) },
+       [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]  = { .len = sizeof(int) },
 };
 
 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
@@ -766,6 +777,63 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
                rt6_purge_dflt_routers(net);
        return 1;
 }
+
+static void addrconf_linkdown_change(struct net *net, __s32 newf)
+{
+       struct net_device *dev;
+       struct inet6_dev *idev;
+
+       for_each_netdev(net, dev) {
+               idev = __in6_dev_get(dev);
+               if (idev) {
+                       int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
+
+                       idev->cnf.ignore_routes_with_linkdown = newf;
+                       if (changed)
+                               inet6_netconf_notify_devconf(dev_net(dev),
+                                                            NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+                                                            dev->ifindex,
+                                                            &idev->cnf);
+               }
+       }
+}
+
+static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
+{
+       struct net *net;
+       int old;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       net = (struct net *)table->extra2;
+       old = *p;
+       *p = newf;
+
+       if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
+               if ((!newf) ^ (!old))
+                       inet6_netconf_notify_devconf(net,
+                                                    NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+                                                    NETCONFA_IFINDEX_DEFAULT,
+                                                    net->ipv6.devconf_dflt);
+               rtnl_unlock();
+               return 0;
+       }
+
+       if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
+               net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
+               addrconf_linkdown_change(net, newf);
+               if ((!newf) ^ (!old))
+                       inet6_netconf_notify_devconf(net,
+                                                    NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+                                                    NETCONFA_IFINDEX_ALL,
+                                                    net->ipv6.devconf_all);
+       }
+       rtnl_unlock();
+
+       return 1;
+}
+
 #endif
 
 /* Nobody refers to this ifaddr, destroy it */
@@ -3588,7 +3656,7 @@ static void addrconf_dad_work(struct work_struct *w)
 
        /* send a neighbour solicitation for our addr */
        addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
-       ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
+       ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
 out:
        in6_ifa_put(ifp);
        rtnl_unlock();
@@ -4616,6 +4684,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
        array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
        array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
+       array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
        /* we omit DEVCONF_STABLE_SECRET for now */
        array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
 }
@@ -4637,6 +4706,7 @@ static inline size_t inet6_if_nlmsg_size(void)
               + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
               + nla_total_size(4) /* IFLA_MTU */
               + nla_total_size(4) /* IFLA_LINK */
+              + nla_total_size(1) /* IFLA_OPERSTATE */
               + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
 }
 
@@ -4893,7 +4963,9 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
             nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
            nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
            (dev->ifindex != dev_get_iflink(dev) &&
-            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
+            nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+           nla_put_u8(skb, IFLA_OPERSTATE,
+                      netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
                goto nla_put_failure;
        protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
        if (!protoinfo)
@@ -5338,6 +5410,34 @@ out:
        return err;
 }
 
+static
+int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
+                                               int write,
+                                               void __user *buffer,
+                                               size_t *lenp,
+                                               loff_t *ppos)
+{
+       int *valp = ctl->data;
+       int val = *valp;
+       loff_t pos = *ppos;
+       struct ctl_table lctl;
+       int ret;
+
+       /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
+        * we should not modify it until we get the rtnl lock.
+        */
+       lctl = *ctl;
+       lctl.data = &val;
+
+       ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
+
+       if (write)
+               ret = addrconf_fixup_linkdown(ctl, valp, val);
+       if (ret)
+               *ppos = pos;
+       return ret;
+}
+
 static struct addrconf_sysctl_table
 {
        struct ctl_table_header *sysctl_header;
@@ -5629,7 +5729,13 @@ static struct addrconf_sysctl_table
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
-
+               },
+               {
+                       .procname       = "ignore_routes_with_linkdown",
+                       .data           = &ipv6_devconf.ignore_routes_with_linkdown,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = addrconf_sysctl_ignore_routes_with_linkdown,
                },
                {
                        /* sentinel */
diff --git a/net/ipv6/ila.c b/net/ipv6/ila.c
new file mode 100644 (file)
index 0000000..678d2df
--- /dev/null
@@ -0,0 +1,229 @@
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+#include <net/ip6_fib.h>
+#include <net/lwtunnel.h>
+#include <net/protocol.h>
+#include <uapi/linux/ila.h>
+
+struct ila_params {
+       __be64 locator;
+       __be64 locator_match;
+       __wsum csum_diff;
+};
+
+static inline struct ila_params *ila_params_lwtunnel(
+       struct lwtunnel_state *lwstate)
+{
+       return (struct ila_params *)lwstate->data;
+}
+
+static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
+{
+       __be32 diff[] = {
+               ~from[0], ~from[1], to[0], to[1],
+       };
+
+       return csum_partial(diff, sizeof(diff), 0);
+}
+
+static inline __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p)
+{
+       if (*(__be64 *)&ip6h->daddr == p->locator_match)
+               return p->csum_diff;
+       else
+               return compute_csum_diff8((__be32 *)&ip6h->daddr,
+                                         (__be32 *)&p->locator);
+}
+
+static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+{
+       __wsum diff;
+       struct ipv6hdr *ip6h = ipv6_hdr(skb);
+       size_t nhoff = sizeof(struct ipv6hdr);
+
+       /* First update checksum */
+       switch (ip6h->nexthdr) {
+       case NEXTHDR_TCP:
+               if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) {
+                       struct tcphdr *th = (struct tcphdr *)
+                                       (skb_network_header(skb) + nhoff);
+
+                       diff = get_csum_diff(ip6h, p);
+                       inet_proto_csum_replace_by_diff(&th->check, skb,
+                                                       diff, true);
+               }
+               break;
+       case NEXTHDR_UDP:
+               if (likely(pskb_may_pull(skb, nhoff + sizeof(struct udphdr)))) {
+                       struct udphdr *uh = (struct udphdr *)
+                                       (skb_network_header(skb) + nhoff);
+
+                       if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+                               diff = get_csum_diff(ip6h, p);
+                               inet_proto_csum_replace_by_diff(&uh->check, skb,
+                                                               diff, true);
+                               if (!uh->check)
+                                       uh->check = CSUM_MANGLED_0;
+                       }
+               }
+               break;
+       case NEXTHDR_ICMP:
+               if (likely(pskb_may_pull(skb,
+                                        nhoff + sizeof(struct icmp6hdr)))) {
+                       struct icmp6hdr *ih = (struct icmp6hdr *)
+                                       (skb_network_header(skb) + nhoff);
+
+                       diff = get_csum_diff(ip6h, p);
+                       inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb,
+                                                       diff, true);
+               }
+               break;
+       }
+
+       /* Now change destination address */
+       *(__be64 *)&ip6h->daddr = p->locator;
+}
+
+static int ila_output(struct sock *sk, struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       if (skb->protocol != htons(ETH_P_IPV6))
+               goto drop;
+
+       update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+       return dst->lwtstate->orig_output(sk, skb);
+
+drop:
+       kfree_skb(skb);
+       return -EINVAL;
+}
+
+static int ila_input(struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       if (skb->protocol != htons(ETH_P_IPV6))
+               goto drop;
+
+       update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+       return dst->lwtstate->orig_input(skb);
+
+drop:
+       kfree_skb(skb);
+       return -EINVAL;
+}
+
+static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+       [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+};
+
+static int ila_build_state(struct net_device *dev, struct nlattr *nla,
+                          unsigned int family, const void *cfg,
+                          struct lwtunnel_state **ts)
+{
+       struct ila_params *p;
+       struct nlattr *tb[ILA_ATTR_MAX + 1];
+       size_t encap_len = sizeof(*p);
+       struct lwtunnel_state *newts;
+       const struct fib6_config *cfg6 = cfg;
+       int ret;
+
+       if (family != AF_INET6)
+               return -EINVAL;
+
+       ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla,
+                              ila_nl_policy);
+       if (ret < 0)
+               return ret;
+
+       if (!tb[ILA_ATTR_LOCATOR])
+               return -EINVAL;
+
+       newts = lwtunnel_state_alloc(encap_len);
+       if (!newts)
+               return -ENOMEM;
+
+       newts->len = encap_len;
+       p = ila_params_lwtunnel(newts);
+
+       p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
+
+       if (cfg6->fc_dst_len > sizeof(__be64)) {
+               /* Precompute checksum difference for translation since we
+                * know both the old locator and the new one.
+                */
+               p->locator_match = *(__be64 *)&cfg6->fc_dst;
+               p->csum_diff = compute_csum_diff8(
+                       (__be32 *)&p->locator_match, (__be32 *)&p->locator);
+       }
+
+       newts->type = LWTUNNEL_ENCAP_ILA;
+       newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
+                       LWTUNNEL_STATE_INPUT_REDIRECT;
+
+       *ts = newts;
+
+       return 0;
+}
+
+static int ila_fill_encap_info(struct sk_buff *skb,
+                              struct lwtunnel_state *lwtstate)
+{
+       struct ila_params *p = ila_params_lwtunnel(lwtstate);
+
+       if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+       /* No encapsulation overhead */
+       return 0;
+}
+
+static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+       struct ila_params *a_p = ila_params_lwtunnel(a);
+       struct ila_params *b_p = ila_params_lwtunnel(b);
+
+       return (a_p->locator != b_p->locator);
+}
+
+static const struct lwtunnel_encap_ops ila_encap_ops = {
+       .build_state = ila_build_state,
+       .output = ila_output,
+       .input = ila_input,
+       .fill_encap = ila_fill_encap_info,
+       .get_encap_size = ila_encap_nlsize,
+       .cmp_encap = ila_encap_cmp,
+};
+
+static int __init ila_init(void)
+{
+       return lwtunnel_encap_add_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+static void __exit ila_fini(void)
+{
+       lwtunnel_encap_del_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+module_init(ila_init);
+module_exit(ila_fini);
+MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
+MODULE_LICENSE("GPL");
index 5693b5eb84820fceb7feb2f87345cd38b2613c6e..418d9823692b6e78077d44c1ed8b15e998e2316b 100644 (file)
@@ -173,12 +173,13 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
                        *ppcpu_rt = NULL;
                }
        }
+
+       non_pcpu_rt->rt6i_pcpu = NULL;
 }
 
 static void rt6_release(struct rt6_info *rt)
 {
        if (atomic_dec_and_test(&rt->rt6i_ref)) {
-               lwtstate_put(rt->rt6i_lwtstate);
                rt6_free_pcpu(rt);
                dst_free(&rt->dst);
        }
index df8afe5ab31e4b8e75bf2fbf844f8b3e798edbba..9405b04eecc64f478960329da93f6e01d437954e 100644 (file)
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
        struct sk_buff *skb_chk = NULL;
        unsigned int transport_len;
        unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
-       int ret;
+       int ret = -EINVAL;
 
        transport_len = ntohs(ipv6_hdr(skb)->payload_len);
        transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
 
-       skb_get(skb);
        skb_chk = skb_checksum_trimmed(skb, transport_len,
                                       ipv6_mc_validate_checksum);
        if (!skb_chk)
-               return -EINVAL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, len)) {
-               kfree_skb(skb_chk);
-               return -EINVAL;
-       }
+       if (!pskb_may_pull(skb_chk, len))
+               goto err;
 
        ret = ipv6_mc_check_mld_msg(skb_chk);
-       if (ret) {
-               kfree_skb(skb_chk);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        if (skb_trimmed)
                *skb_trimmed = skb_chk;
-       else
+       /* free now unneeded clone */
+       else if (skb_chk != skb)
                kfree_skb(skb_chk);
 
-       return 0;
+       ret = 0;
+
+err:
+       if (ret && skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return ret;
 }
 
 /**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
  * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
  *
  * Checks whether an IPv6 packet is a valid MLD packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
  *
  * -EINVAL: A broken packet was detected, i.e. it violates some internet
  *  standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
  * to leave the original skb and its full frame unchanged (which might be
  * desirable for layer 2 frame jugglers).
  *
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
  */
 int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
 {
index b3054611f88a5f69503e1a44ced1592579dfc4fd..13d3c2beb93ea3e6e7745858ac2ba0520b0d65ba 100644 (file)
@@ -553,7 +553,8 @@ static void ndisc_send_unsol_na(struct net_device *dev)
 
 void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
                   const struct in6_addr *solicit,
-                  const struct in6_addr *daddr, const struct in6_addr *saddr)
+                  const struct in6_addr *daddr, const struct in6_addr *saddr,
+                  struct sk_buff *oskb)
 {
        struct sk_buff *skb;
        struct in6_addr addr_buf;
@@ -589,6 +590,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
                ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
                                       dev->dev_addr);
 
+       if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
+               skb_dst_copy(skb, oskb);
+
        ndisc_send_skb(skb, daddr, saddr);
 }
 
@@ -675,12 +679,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
                                  "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
                                  __func__, target);
                }
-               ndisc_send_ns(dev, neigh, target, target, saddr);
+               ndisc_send_ns(dev, neigh, target, target, saddr, skb);
        } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
                neigh_app_ns(neigh);
        } else {
                addrconf_addr_solict_mult(target, &mcaddr);
-               ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
+               ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
        }
 }
 
index b552cf0d6198c50308461d0d83ac09bdd8c21f82..96833e4b31939a191eaf7de297ac438d4aa41fa4 100644 (file)
@@ -47,9 +47,21 @@ config NFT_REJECT_IPV6
        default NFT_REJECT
        tristate
 
+config NFT_DUP_IPV6
+       tristate "IPv6 nf_tables packet duplication support"
+       select NF_DUP_IPV6
+       help
+         This module enables IPv6 packet duplication support for nf_tables.
+
 endif # NF_TABLES_IPV6
 endif # NF_TABLES
 
+config NF_DUP_IPV6
+       tristate "Netfilter IPv6 packet duplication to alternate destination"
+       help
+         This option enables the nf_dup_ipv6 core, which duplicates an IPv6
+         packet to be rerouted to another destination.
+
 config NF_REJECT_IPV6
        tristate "IPv6 packet rejection"
        default m if NETFILTER_ADVANCED=n
index c36e0a5490de10cd64f5c64571efa13628568199..b4f7d0b4e2afc630f7a5be2ae949dff676dc5985 100644 (file)
@@ -30,6 +30,8 @@ obj-$(CONFIG_NF_LOG_IPV6) += nf_log_ipv6.o
 # reject
 obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o
 
+obj-$(CONFIG_NF_DUP_IPV6) += nf_dup_ipv6.o
+
 # nf_tables
 obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
 obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
@@ -37,6 +39,7 @@ obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
 obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
 obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o
 obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
+obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
 
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
index 6edb7b106de769728357174d0657c644f83e41e8..ebbb754c2111b73c87fe85aa58b3124e0a3032ef 100644 (file)
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
 }
 
 static void
-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+synproxy_send_tcp(const struct synproxy_net *snet,
+                 const struct sk_buff *skb, struct sk_buff *nskb,
                  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
                  struct ipv6hdr *niph, struct tcphdr *nth,
                  unsigned int tcp_hdr_size)
 {
-       struct net *net = nf_ct_net((struct nf_conn *)nfct);
+       struct net *net = nf_ct_net(snet->tmpl);
        struct dst_entry *dst;
        struct flowi6 fl6;
 
@@ -83,7 +84,8 @@ free_nskb:
 }
 
 static void
-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+synproxy_send_client_synack(const struct synproxy_net *snet,
+                           const struct sk_buff *skb, const struct tcphdr *th,
                            const struct synproxy_options *opts)
 {
        struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
                          niph, nth, tcp_hdr_size);
 }
 
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+       synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
                          niph, nth, tcp_hdr_size);
 }
 
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
 }
 
 static void
@@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+                         niph, nth, tcp_hdr_size);
 }
 
 static bool
@@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
                                          XT_SYNPROXY_OPT_SACK_PERM |
                                          XT_SYNPROXY_OPT_ECN);
 
-               synproxy_send_client_synack(skb, th, &opts);
+               synproxy_send_client_synack(snet, skb, th, &opts);
                return NF_DROP;
 
        } else if (th->ack && !(th->fin || th->rst || th->syn)) {
index 4ba0c34c627b0e88d3a06fda6532c83a3936315e..7302900c321aff58fcb7dc21794b50e04b1942d8 100644 (file)
@@ -251,7 +251,7 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
        if (*len < 0 || (unsigned int) *len < sizeof(sin6))
                return -EINVAL;
 
-       h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+       h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
        if (!h) {
                pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
                         &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
index 90388d606483cbbd15e421b3e51f6d757cd05883..0e6fae103d33454f70fb5790b71d2529af969636 100644 (file)
@@ -150,7 +150,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
        struct nf_conntrack_tuple intuple, origtuple;
        const struct nf_conntrack_tuple_hash *h;
        const struct nf_conntrack_l4proto *inproto;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+       struct nf_conntrack_zone tmp;
 
        NF_CT_ASSERT(skb->nfct == NULL);
 
@@ -177,7 +177,8 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
 
        *ctinfo = IP_CT_RELATED;
 
-       h = nf_conntrack_find_get(net, zone, &intuple);
+       h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
+                                 &intuple);
        if (!h) {
                pr_debug("icmpv6_error: no match\n");
                return -NF_ACCEPT;
index 267fb8d5876e169f27e0e9a595dc89a20cfbea4e..6d9c0b3d5b8c49d111cca7bd70b9bc5229f0a263 100644 (file)
 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
                                                struct sk_buff *skb)
 {
-       u16 zone = NF_CT_DEFAULT_ZONE;
-
+       u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       if (skb->nfct)
-               zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+       if (skb->nfct) {
+               enum ip_conntrack_info ctinfo;
+               const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+               zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+       }
 #endif
        if (nf_bridge_in_prerouting(skb))
-               return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+               return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
 
        if (hooknum == NF_INET_PRE_ROUTING)
-               return IP6_DEFRAG_CONNTRACK_IN + zone;
+               return IP6_DEFRAG_CONNTRACK_IN + zone_id;
        else
-               return IP6_DEFRAG_CONNTRACK_OUT + zone;
-
+               return IP6_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
 static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
new file mode 100644 (file)
index 0000000..d8ab654
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/skbuff.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+       const struct dst_entry *dst;
+
+       if (skb->dev != NULL)
+               return dev_net(skb->dev);
+       dst = skb_dst(skb);
+       if (dst != NULL && dst->dev != NULL)
+               return dev_net(dst->dev);
+#endif
+       return &init_net;
+}
+
+static bool nf_dup_ipv6_route(struct sk_buff *skb, const struct in6_addr *gw,
+                             int oif)
+{
+       const struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct net *net = pick_net(skb);
+       struct dst_entry *dst;
+       struct flowi6 fl6;
+
+       memset(&fl6, 0, sizeof(fl6));
+       if (oif != -1)
+               fl6.flowi6_oif = oif;
+
+       fl6.daddr = *gw;
+       fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
+                        (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+       dst = ip6_route_output(net, NULL, &fl6);
+       if (dst->error) {
+               dst_release(dst);
+               return false;
+       }
+       skb_dst_drop(skb);
+       skb_dst_set(skb, dst);
+       skb->dev      = dst->dev;
+       skb->protocol = htons(ETH_P_IPV6);
+
+       return true;
+}
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+                const struct in6_addr *gw, int oif)
+{
+       if (this_cpu_read(nf_skb_duplicated))
+               return;
+       skb = pskb_copy(skb, GFP_ATOMIC);
+       if (skb == NULL)
+               return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       nf_conntrack_put(skb->nfct);
+       skb->nfct     = &nf_ct_untracked_get()->ct_general;
+       skb->nfctinfo = IP_CT_NEW;
+       nf_conntrack_get(skb->nfct);
+#endif
+       if (hooknum == NF_INET_PRE_ROUTING ||
+           hooknum == NF_INET_LOCAL_IN) {
+               struct ipv6hdr *iph = ipv6_hdr(skb);
+               --iph->hop_limit;
+       }
+       if (nf_dup_ipv6_route(skb, gw, oif)) {
+               __this_cpu_write(nf_skb_duplicated, true);
+               ip6_local_out(skb);
+               __this_cpu_write(nf_skb_duplicated, false);
+       } else {
+               kfree_skb(skb);
+       }
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv6);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv6: IPv6 packet duplication");
+MODULE_LICENSE("GPL");
index e76900e0aa925a26c226f733f9a44e396ea7cc7f..70fbaed49edbc5511d9327be5c9c0e003dc12e7d 100644 (file)
@@ -124,7 +124,7 @@ static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
                newip = &t->dst.u3.in6;
        }
        inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
-                                 newip->s6_addr32, 1);
+                                 newip->s6_addr32, true);
 }
 
 static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
@@ -155,7 +155,7 @@ static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
                }
        } else
                inet_proto_csum_replace2(check, skb,
-                                        htons(oldlen), htons(datalen), 1);
+                                        htons(oldlen), htons(datalen), true);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index 2205e8eeeacfa2ff56980cbeb73b6d52c77089fe..57593b00c5b4327164b79567be619f0f7561f5a2 100644 (file)
@@ -73,7 +73,7 @@ icmpv6_manip_pkt(struct sk_buff *skb,
            hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
                inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
                                         hdr->icmp6_identifier,
-                                        tuple->src.u.icmp.id, 0);
+                                        tuple->src.u.icmp.id, false);
                hdr->icmp6_identifier = tuple->src.u.icmp.id;
        }
        return true;
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
new file mode 100644 (file)
index 0000000..0eaa4f6
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+
+struct nft_dup_ipv6 {
+       enum nft_registers      sreg_addr:8;
+       enum nft_registers      sreg_dev:8;
+};
+
+static void nft_dup_ipv6_eval(const struct nft_expr *expr,
+                             struct nft_regs *regs,
+                             const struct nft_pktinfo *pkt)
+{
+       struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+       struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
+       int oif = regs->data[priv->sreg_dev];
+
+       nf_dup_ipv6(pkt->skb, pkt->ops->hooknum, gw, oif);
+}
+
+static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
+                            const struct nft_expr *expr,
+                            const struct nlattr * const tb[])
+{
+       struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+               return -EINVAL;
+
+       priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+       err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr));
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+               priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+               return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+       }
+       return 0;
+}
+
+static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+
+       if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+           nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv6_type;
+static const struct nft_expr_ops nft_dup_ipv6_ops = {
+       .type           = &nft_dup_ipv6_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv6)),
+       .eval           = nft_dup_ipv6_eval,
+       .init           = nft_dup_ipv6_init,
+       .dump           = nft_dup_ipv6_dump,
+};
+
+static const struct nla_policy nft_dup_ipv6_policy[NFTA_DUP_MAX + 1] = {
+       [NFTA_DUP_SREG_ADDR]    = { .type = NLA_U32 },
+       [NFTA_DUP_SREG_DEV]     = { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv6_type __read_mostly = {
+       .family         = NFPROTO_IPV6,
+       .name           = "dup",
+       .ops            = &nft_dup_ipv6_ops,
+       .policy         = nft_dup_ipv6_policy,
+       .maxattr        = NFTA_DUP_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_dup_ipv6_module_init(void)
+{
+       return nft_register_expr(&nft_dup_ipv6_type);
+}
+
+static void __exit nft_dup_ipv6_module_exit(void)
+{
+       nft_unregister_expr(&nft_dup_ipv6_type);
+}
+
+module_init(nft_dup_ipv6_module_init);
+module_exit(nft_dup_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
index 54fccf0d705ddee83e3ba1e1b655fc58cd5c38a4..df3e353a012d081699df247482c9932a9f69ee9c 100644 (file)
 #include <net/tcp.h>
 #include <linux/rtnetlink.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/xfrm.h>
 #include <net/netevent.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
 #include <net/lwtunnel.h>
+#include <net/ip_tunnels.h>
 
 #include <asm/uaccess.h>
 
@@ -319,8 +321,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
 /* allocate dst with ip6_dst_ops */
 static struct rt6_info *__ip6_dst_alloc(struct net *net,
                                        struct net_device *dev,
-                                       int flags,
-                                       struct fib6_table *table)
+                                       int flags)
 {
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
                                        0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -337,10 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
 
 static struct rt6_info *ip6_dst_alloc(struct net *net,
                                      struct net_device *dev,
-                                     int flags,
-                                     struct fib6_table *table)
+                                     int flags)
 {
-       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
 
        if (rt) {
                rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -538,7 +538,7 @@ static void rt6_probe_deferred(struct work_struct *w)
                container_of(w, struct __rt6_probe_work, work);
 
        addrconf_addr_solict_mult(&work->target, &mcaddr);
-       ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+       ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
        dev_put(work->dev);
        kfree(work);
 }
@@ -665,6 +665,12 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
 {
        int m;
        bool match_do_rr = false;
+       struct inet6_dev *idev = rt->rt6i_idev;
+       struct net_device *dev = rt->dst.dev;
+
+       if (dev && !netif_carrier_ok(dev) &&
+           idev->cnf.ignore_routes_with_linkdown)
+               goto out;
 
        if (rt6_check_expired(rt))
                goto out;
@@ -951,8 +957,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
        if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
                ort = (struct rt6_info *)ort->dst.from;
 
-       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
-                            0, ort->rt6i_table);
+       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
 
        if (!rt)
                return NULL;
@@ -984,8 +989,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
        struct rt6_info *pcpu_rt;
 
        pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
-                                 rt->dst.dev, rt->dst.flags,
-                                 rt->rt6i_table);
+                                 rt->dst.dev, rt->dst.flags);
 
        if (!pcpu_rt)
                return NULL;
@@ -998,32 +1002,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
 /* It should be called with read_lock_bh(&tb6_lock) acquired */
 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
 {
-       struct rt6_info *pcpu_rt, *prev, **p;
+       struct rt6_info *pcpu_rt, **p;
 
        p = this_cpu_ptr(rt->rt6i_pcpu);
        pcpu_rt = *p;
 
-       if (pcpu_rt)
-               goto done;
+       if (pcpu_rt) {
+               dst_hold(&pcpu_rt->dst);
+               rt6_dst_from_metrics_check(pcpu_rt);
+       }
+       return pcpu_rt;
+}
+
+static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
+{
+       struct fib6_table *table = rt->rt6i_table;
+       struct rt6_info *pcpu_rt, *prev, **p;
 
        pcpu_rt = ip6_rt_pcpu_alloc(rt);
        if (!pcpu_rt) {
                struct net *net = dev_net(rt->dst.dev);
 
-               pcpu_rt = net->ipv6.ip6_null_entry;
-               goto done;
+               dst_hold(&net->ipv6.ip6_null_entry->dst);
+               return net->ipv6.ip6_null_entry;
        }
 
-       prev = cmpxchg(p, NULL, pcpu_rt);
-       if (prev) {
-               /* If someone did it before us, return prev instead */
+       read_lock_bh(&table->tb6_lock);
+       if (rt->rt6i_pcpu) {
+               p = this_cpu_ptr(rt->rt6i_pcpu);
+               prev = cmpxchg(p, NULL, pcpu_rt);
+               if (prev) {
+                       /* If someone did it before us, return prev instead */
+                       dst_destroy(&pcpu_rt->dst);
+                       pcpu_rt = prev;
+               }
+       } else {
+               /* rt has been removed from the fib6 tree
+                * before we have a chance to acquire the read_lock.
+                * In this case, don't brother to create a pcpu rt
+                * since rt is going away anyway.  The next
+                * dst_check() will trigger a re-lookup.
+                */
                dst_destroy(&pcpu_rt->dst);
-               pcpu_rt = prev;
+               pcpu_rt = rt;
        }
-
-done:
        dst_hold(&pcpu_rt->dst);
        rt6_dst_from_metrics_check(pcpu_rt);
+       read_unlock_bh(&table->tb6_lock);
        return pcpu_rt;
 }
 
@@ -1098,9 +1123,22 @@ redo_rt6_select:
                rt->dst.lastuse = jiffies;
                rt->dst.__use++;
                pcpu_rt = rt6_get_pcpu_route(rt);
-               read_unlock_bh(&table->tb6_lock);
+
+               if (pcpu_rt) {
+                       read_unlock_bh(&table->tb6_lock);
+               } else {
+                       /* We have to do the read_unlock first
+                        * because rt6_make_pcpu_route() may trigger
+                        * ip6_dst_gc() which will take the write_lock.
+                        */
+                       dst_hold(&rt->dst);
+                       read_unlock_bh(&table->tb6_lock);
+                       pcpu_rt = rt6_make_pcpu_route(rt);
+                       dst_release(&rt->dst);
+               }
 
                return pcpu_rt;
+
        }
 }
 
@@ -1125,6 +1163,7 @@ void ip6_route_input(struct sk_buff *skb)
        const struct ipv6hdr *iph = ipv6_hdr(skb);
        struct net *net = dev_net(skb->dev);
        int flags = RT6_LOOKUP_F_HAS_SADDR;
+       struct ip_tunnel_info *tun_info;
        struct flowi6 fl6 = {
                .flowi6_iif = skb->dev->ifindex,
                .daddr = iph->daddr,
@@ -1134,6 +1173,10 @@ void ip6_route_input(struct sk_buff *skb)
                .flowi6_proto = iph->nexthdr,
        };
 
+       tun_info = skb_tunnel_info(skb);
+       if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
+               fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
+       skb_dst_drop(skb);
        skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
 }
 
@@ -1556,7 +1599,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        if (unlikely(!idev))
                return ERR_PTR(-ENODEV);
 
-       rt = ip6_dst_alloc(net, dev, 0, NULL);
+       rt = ip6_dst_alloc(net, dev, 0);
        if (unlikely(!rt)) {
                in6_dev_put(idev);
                dst = ERR_PTR(-ENOMEM);
@@ -1743,7 +1786,8 @@ int ip6_route_add(struct fib6_config *cfg)
        if (!table)
                goto out;
 
-       rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+       rt = ip6_dst_alloc(net, NULL,
+                          (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
 
        if (!rt) {
                err = -ENOMEM;
@@ -1775,12 +1819,19 @@ int ip6_route_add(struct fib6_config *cfg)
                struct lwtunnel_state *lwtstate;
 
                err = lwtunnel_build_state(dev, cfg->fc_encap_type,
-                                          cfg->fc_encap, &lwtstate);
+                                          cfg->fc_encap, AF_INET6, cfg,
+                                          &lwtstate);
                if (err)
                        goto out;
-               rt->rt6i_lwtstate = lwtstate_get(lwtstate);
-               if (lwtunnel_output_redirect(rt->rt6i_lwtstate))
-                       rt->dst.output = lwtunnel_output6;
+               rt->dst.lwtstate = lwtstate_get(lwtstate);
+               if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
+                       rt->dst.lwtstate->orig_output = rt->dst.output;
+                       rt->dst.output = lwtunnel_output;
+               }
+               if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
+                       rt->dst.lwtstate->orig_input = rt->dst.input;
+                       rt->dst.input = lwtunnel_input;
+               }
        }
 
        ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
@@ -1844,6 +1895,7 @@ int ip6_route_add(struct fib6_config *cfg)
                int gwa_type;
 
                gw_addr = &cfg->fc_gateway;
+               gwa_type = ipv6_addr_type(gw_addr);
 
                /* if gw_addr is local we will fail to detect this in case
                 * address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1851,11 +1903,12 @@ int ip6_route_add(struct fib6_config *cfg)
                 * prefix route was assigned to, which might be non-loopback.
                 */
                err = -EINVAL;
-               if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0))
+               if (ipv6_chk_addr_and_flags(net, gw_addr,
+                                           gwa_type & IPV6_ADDR_LINKLOCAL ?
+                                           dev : NULL, 0, 0))
                        goto out;
 
                rt->rt6i_gateway = *gw_addr;
-               gwa_type = ipv6_addr_type(gw_addr);
 
                if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
                        struct rt6_info *grt;
@@ -2160,7 +2213,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
 #endif
        rt->rt6i_prefsrc = ort->rt6i_prefsrc;
        rt->rt6i_table = ort->rt6i_table;
-       rt->rt6i_lwtstate = lwtstate_get(ort->rt6i_lwtstate);
+       rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2411,7 +2464,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 {
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
-                                           DST_NOCOUNT, NULL);
+                                           DST_NOCOUNT);
        if (!rt)
                return ERR_PTR(-ENOMEM);
 
@@ -2824,7 +2877,7 @@ static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
               + nla_total_size(sizeof(struct rta_cacheinfo))
               + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
               + nla_total_size(1) /* RTA_PREF */
-              + lwtunnel_get_encap_size(rt->rt6i_lwtstate);
+              + lwtunnel_get_encap_size(rt->dst.lwtstate);
 }
 
 static int rt6_fill_node(struct net *net,
@@ -2885,6 +2938,11 @@ static int rt6_fill_node(struct net *net,
        else
                rtm->rtm_type = RTN_UNICAST;
        rtm->rtm_flags = 0;
+       if (!netif_carrier_ok(rt->dst.dev)) {
+               rtm->rtm_flags |= RTNH_F_LINKDOWN;
+               if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
+                       rtm->rtm_flags |= RTNH_F_DEAD;
+       }
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = rt->rt6i_protocol;
        if (rt->rt6i_flags & RTF_DYNAMIC)
@@ -2972,7 +3030,7 @@ static int rt6_fill_node(struct net *net,
        if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
                goto nla_put_failure;
 
-       lwtunnel_fill_encap(skb, rt->rt6i_lwtstate);
+       lwtunnel_fill_encap(skb, rt->dst.lwtstate);
 
        nlmsg_end(skb, nlh);
        return 0;
index ac35a28599be557cac1114cad35e3c47c41436c1..94428fd85b2f9543c0e3015f785306ce203e73dc 100644 (file)
@@ -742,7 +742,7 @@ static int ipip_rcv(struct sk_buff *skb)
                        goto drop;
                if (iptunnel_pull_header(skb, 0, tpi.proto))
                        goto drop;
-               return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+               return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
        }
 
        return 1;
index 52dd0d9974d6c8dbaa4961434211eda2f55b6482..97d9314ea3611eeadd576d2fd8919ae60d468891 100644 (file)
@@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
                                   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
        if (req) {
                nsk = tcp_check_req(sk, skb, req, false);
-               if (!nsk)
+               if (!nsk || nsk == sk)
                        reqsk_put(req);
                return nsk;
        }
index e51fc3eee6dbd65506e8612fc5782b9482cf4708..0aba654f5b91c198cce365a5dd619e30b566e798 100644 (file)
@@ -1496,7 +1496,8 @@ int __net_init udp6_proc_init(struct net *net)
        return udp_proc_register(net, &udp6_seq_afinfo);
 }
 
-void udp6_proc_exit(struct net *net) {
+void udp6_proc_exit(struct net *net)
+{
        udp_proc_unregister(net, &udp6_seq_afinfo);
 }
 #endif /* CONFIG_PROC_FS */
index 901ef6f8addc0cf730909d2656513372cd6cf80f..f7fbdbabe50efbc91ea5ed811bdf24b33172cd4b 100644 (file)
 
 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 {
-       const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
        struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
 
-       if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
+       if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
                IP6_ECN_set_ce(inner_iph);
 }
 
index ed0583c1b9fc2e0033912e2d4c7177a12f17e8b7..a74013d3eceb448e07d39bc0e2dc5dd03dc401bd 100644 (file)
@@ -26,7 +26,7 @@
 
 static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
 
-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
                                          const xfrm_address_t *saddr,
                                          const xfrm_address_t *daddr)
 {
@@ -35,6 +35,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
        int err;
 
        memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_oif = oif;
        memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
        if (saddr)
                memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -50,13 +51,13 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
        return dst;
 }
 
-static int xfrm6_get_saddr(struct net *net,
+static int xfrm6_get_saddr(struct net *net, int oif,
                           xfrm_address_t *saddr, xfrm_address_t *daddr)
 {
        struct dst_entry *dst;
        struct net_device *dev;
 
-       dst = xfrm6_dst_lookup(net, 0, NULL, daddr);
+       dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
        if (IS_ERR(dst))
                return -EHOSTUNREACH;
 
index 086de496a4c197bb98a5e0b550a9b67d516d4902..3891cbd2adeab7bdcb062be3dfb36926cf91d23c 100644 (file)
@@ -7,7 +7,6 @@ config MAC80211
        select CRYPTO_CCM
        select CRYPTO_GCM
        select CRC32
-       select AVERAGE
        ---help---
          This option enables the hardware independent IEEE 802.11
          networking stack.
index 3275f01881bee8a53a046e117873347fe04877c8..783e891b7525c537bcab70b1be7c603e5f6d1f0d 100644 (file)
@@ -3,6 +3,7 @@ obj-$(CONFIG_MAC80211) += mac80211.o
 # mac80211 objects
 mac80211-y := \
        main.o status.o \
+       driver-ops.o \
        sta_info.o \
        wep.o \
        wpa.o \
index 4192806be3d36884d22ce5830a43cae63d54745d..bdf0790d89cca6fe3f64c097c84299ac35e1f3d0 100644 (file)
@@ -145,20 +145,3 @@ void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
 {
        crypto_free_cipher(tfm);
 }
-
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
-                                       u8 *k1, u8 *k2)
-{
-       u8 l[AES_BLOCK_SIZE] = {};
-       struct ieee80211_key *key =
-               container_of(keyconf, struct ieee80211_key, conf);
-
-       crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l);
-
-       memcpy(k1, l, AES_BLOCK_SIZE);
-       gf_mulx(k1);
-
-       memcpy(k2, k1, AES_BLOCK_SIZE);
-       gf_mulx(k2);
-}
-EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2);
index bf7023f6c3278289f1100f7ce6fa4f56d72caa15..685ec13ed7c2b0a2dcdcf82d7388d1c44d041a26 100644 (file)
@@ -1019,6 +1019,65 @@ static int sta_apply_auth_flags(struct ieee80211_local *local,
        return 0;
 }
 
+static void sta_apply_mesh_params(struct ieee80211_local *local,
+                                 struct sta_info *sta,
+                                 struct station_parameters *params)
+{
+#ifdef CONFIG_MAC80211_MESH
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u32 changed = 0;
+
+       if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
+               switch (params->plink_state) {
+               case NL80211_PLINK_ESTAB:
+                       if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+                               changed = mesh_plink_inc_estab_count(sdata);
+                       sta->mesh->plink_state = params->plink_state;
+
+                       ieee80211_mps_sta_status_update(sta);
+                       changed |= ieee80211_mps_set_sta_local_pm(sta,
+                                     sdata->u.mesh.mshcfg.power_mode);
+                       break;
+               case NL80211_PLINK_LISTEN:
+               case NL80211_PLINK_BLOCKED:
+               case NL80211_PLINK_OPN_SNT:
+               case NL80211_PLINK_OPN_RCVD:
+               case NL80211_PLINK_CNF_RCVD:
+               case NL80211_PLINK_HOLDING:
+                       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+                               changed = mesh_plink_dec_estab_count(sdata);
+                       sta->mesh->plink_state = params->plink_state;
+
+                       ieee80211_mps_sta_status_update(sta);
+                       changed |= ieee80211_mps_set_sta_local_pm(sta,
+                                       NL80211_MESH_POWER_UNKNOWN);
+                       break;
+               default:
+                       /*  nothing  */
+                       break;
+               }
+       }
+
+       switch (params->plink_action) {
+       case NL80211_PLINK_ACTION_NO_ACTION:
+               /* nothing */
+               break;
+       case NL80211_PLINK_ACTION_OPEN:
+               changed |= mesh_plink_open(sta);
+               break;
+       case NL80211_PLINK_ACTION_BLOCK:
+               changed |= mesh_plink_block(sta);
+               break;
+       }
+
+       if (params->local_pm)
+               changed |= ieee80211_mps_set_sta_local_pm(sta,
+                                                         params->local_pm);
+
+       ieee80211_mbss_info_change_notify(sdata, changed);
+#endif
+}
+
 static int sta_apply_parameters(struct ieee80211_local *local,
                                struct sta_info *sta,
                                struct station_parameters *params)
@@ -1076,7 +1135,6 @@ static int sta_apply_parameters(struct ieee80211_local *local,
        }
 
        if (mask & BIT(NL80211_STA_FLAG_MFP)) {
-               sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
                if (set & BIT(NL80211_STA_FLAG_MFP))
                        set_sta_flag(sta, WLAN_STA_MFP);
                else
@@ -1097,6 +1155,12 @@ static int sta_apply_parameters(struct ieee80211_local *local,
            params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)
                set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH);
 
+       if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+           ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
+           params->ext_capab_len >= 8 &&
+           params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED)
+               set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW);
+
        if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
                sta->sta.uapsd_queues = params->uapsd_queues;
                sta->sta.max_sp = params->max_sp;
@@ -1144,62 +1208,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                                              band, false);
        }
 
-       if (ieee80211_vif_is_mesh(&sdata->vif)) {
-#ifdef CONFIG_MAC80211_MESH
-               u32 changed = 0;
-
-               if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
-                       switch (params->plink_state) {
-                       case NL80211_PLINK_ESTAB:
-                               if (sta->plink_state != NL80211_PLINK_ESTAB)
-                                       changed = mesh_plink_inc_estab_count(
-                                                       sdata);
-                               sta->plink_state = params->plink_state;
-
-                               ieee80211_mps_sta_status_update(sta);
-                               changed |= ieee80211_mps_set_sta_local_pm(sta,
-                                             sdata->u.mesh.mshcfg.power_mode);
-                               break;
-                       case NL80211_PLINK_LISTEN:
-                       case NL80211_PLINK_BLOCKED:
-                       case NL80211_PLINK_OPN_SNT:
-                       case NL80211_PLINK_OPN_RCVD:
-                       case NL80211_PLINK_CNF_RCVD:
-                       case NL80211_PLINK_HOLDING:
-                               if (sta->plink_state == NL80211_PLINK_ESTAB)
-                                       changed = mesh_plink_dec_estab_count(
-                                                       sdata);
-                               sta->plink_state = params->plink_state;
-
-                               ieee80211_mps_sta_status_update(sta);
-                               changed |= ieee80211_mps_set_sta_local_pm(sta,
-                                               NL80211_MESH_POWER_UNKNOWN);
-                               break;
-                       default:
-                               /*  nothing  */
-                               break;
-                       }
-               }
-
-               switch (params->plink_action) {
-               case NL80211_PLINK_ACTION_NO_ACTION:
-                       /* nothing */
-                       break;
-               case NL80211_PLINK_ACTION_OPEN:
-                       changed |= mesh_plink_open(sta);
-                       break;
-               case NL80211_PLINK_ACTION_BLOCK:
-                       changed |= mesh_plink_block(sta);
-                       break;
-               }
-
-               if (params->local_pm)
-                       changed |=
-                             ieee80211_mps_set_sta_local_pm(sta,
-                                                            params->local_pm);
-               ieee80211_mbss_info_change_notify(sdata, changed);
-#endif
-       }
+       if (ieee80211_vif_is_mesh(&sdata->vif))
+               sta_apply_mesh_params(local, sta, params);
 
        /* set the STA state after all sta info from usermode has been set */
        if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
@@ -2358,6 +2368,8 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
        const u8 *ap;
        enum ieee80211_smps_mode old_req;
        int err;
+       struct sta_info *sta;
+       bool tdls_peer_found = false;
 
        lockdep_assert_held(&sdata->wdev.mtx);
 
@@ -2382,11 +2394,22 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
 
        ap = sdata->u.mgd.associated->bssid;
 
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+               if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+                       continue;
+
+               tdls_peer_found = true;
+               break;
+       }
+       rcu_read_unlock();
+
        if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
-               if (sdata->u.mgd.powersave)
-                       smps_mode = IEEE80211_SMPS_DYNAMIC;
-               else
+               if (tdls_peer_found || !sdata->u.mgd.powersave)
                        smps_mode = IEEE80211_SMPS_OFF;
+               else
+                       smps_mode = IEEE80211_SMPS_DYNAMIC;
        }
 
        /* send SM PS frame to AP */
@@ -2394,6 +2417,8 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
                                         ap, ap);
        if (err)
                sdata->u.mgd.req_smps = old_req;
+       else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
+               ieee80211_teardown_tdls_peers(sdata);
 
        return err;
 }
@@ -2479,16 +2504,26 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
                sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
                memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs,
                       sizeof(mask->control[i].ht_mcs));
+               memcpy(sdata->rc_rateidx_vht_mcs_mask[i],
+                      mask->control[i].vht_mcs,
+                      sizeof(mask->control[i].vht_mcs));
 
                sdata->rc_has_mcs_mask[i] = false;
+               sdata->rc_has_vht_mcs_mask[i] = false;
                if (!sband)
                        continue;
 
-               for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++)
-                       if (~sdata->rc_rateidx_mcs_mask[i][j]) {
+               for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
+                       if (~sdata->rc_rateidx_mcs_mask[i][j])
                                sdata->rc_has_mcs_mask[i] = true;
+
+                       if (~sdata->rc_rateidx_vht_mcs_mask[i][j])
+                               sdata->rc_has_vht_mcs_mask[i] = true;
+
+                       if (sdata->rc_has_mcs_mask[i] &&
+                           sdata->rc_has_vht_mcs_mask[i])
                                break;
-                       }
+               }
        }
 
        return 0;
index f01c18a3160e11d72dae9e2a0939530ec805f6a5..1d1b9b7bdefe74ac851ca6d01554d663fae39541 100644 (file)
@@ -190,7 +190,7 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
        return NULL;
 }
 
-static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
 {
        switch (sta->bandwidth) {
        case IEEE80211_STA_RX_BW_20:
@@ -264,9 +264,17 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
                case NL80211_IFTYPE_AP_VLAN:
                        width = ieee80211_get_max_required_bw(sdata);
                        break;
+               case NL80211_IFTYPE_STATION:
+                       /*
+                        * The ap's sta->bandwidth is not set yet at this
+                        * point, so take the width from the chandef, but
+                        * account also for TDLS peers
+                        */
+                       width = max(vif->bss_conf.chandef.width,
+                                   ieee80211_get_max_required_bw(sdata));
+                       break;
                case NL80211_IFTYPE_P2P_DEVICE:
                        continue;
-               case NL80211_IFTYPE_STATION:
                case NL80211_IFTYPE_ADHOC:
                case NL80211_IFTYPE_WDS:
                case NL80211_IFTYPE_MESH_POINT:
@@ -554,12 +562,13 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
        kfree_rcu(ctx, rcu_head);
 }
 
-static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
-                                             struct ieee80211_chanctx *ctx)
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+                                      struct ieee80211_chanctx *ctx)
 {
        struct ieee80211_chanctx_conf *conf = &ctx->conf;
        struct ieee80211_sub_if_data *sdata;
        const struct cfg80211_chan_def *compat = NULL;
+       struct sta_info *sta;
 
        lockdep_assert_held(&local->chanctx_mtx);
 
@@ -581,6 +590,20 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
                if (WARN_ON_ONCE(!compat))
                        break;
        }
+
+       /* TDLS peers can sometimes affect the chandef width */
+       list_for_each_entry_rcu(sta, &local->sta_list, list) {
+               if (!sta->uploaded ||
+                   !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
+                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+                   !sta->tdls_chandef.chan)
+                       continue;
+
+               compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
+                                                    compat);
+               if (WARN_ON_ONCE(!compat))
+                       break;
+       }
        rcu_read_unlock();
 
        if (!compat)
index 3ea8b7de963368faf6effceec8ad0e73c8d7fb54..ced6bf3be8d6cf5d3d9fc80b6c46f48c4e567aef 100644 (file)
@@ -122,6 +122,7 @@ static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = {
        FLAG(CHANCTX_STA_CSA),
        FLAG(SUPPORTS_CLONED_SKBS),
        FLAG(SINGLE_SCAN_ON_ALL_BANDS),
+       FLAG(TDLS_WIDER_BW),
 
        /* keep last for the build bug below */
        (void *)0x1
@@ -277,7 +278,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
        DEBUGFS_STATS_ADD(rx_handlers_queued);
        DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
        DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
-       DEBUGFS_STATS_ADD(rx_handlers_drop_short);
        DEBUGFS_STATS_ADD(tx_expand_skb_head);
        DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
        DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag);
index e82bf1e9d7a83e32566bf9d96f8f461bd4f141ab..702ca122c498938691842d95db7732d6d8c6d6bb 100644 (file)
@@ -57,7 +57,6 @@ KEY_CONF_FILE(keylen, D);
 KEY_CONF_FILE(keyidx, D);
 KEY_CONF_FILE(hw_key_idx, D);
 KEY_FILE(flags, X);
-KEY_FILE(tx_rx_count, D);
 KEY_READ(ifindex, sdata->name, "%s\n");
 KEY_OPS(ifindex);
 
@@ -310,7 +309,6 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
        DEBUGFS_ADD(flags);
        DEBUGFS_ADD(keyidx);
        DEBUGFS_ADD(hw_key_idx);
-       DEBUGFS_ADD(tx_rx_count);
        DEBUGFS_ADD(algorithm);
        DEBUGFS_ADD(tx_spec);
        DEBUGFS_ADD(rx_spec);
index c09c0131bfa227e99346b180f501cc6fcf64644a..1021e87c051f35168eef1274a38a4d720054359e 100644 (file)
@@ -186,6 +186,38 @@ IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz,
 IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
                  rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
 
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz(
+                               const struct ieee80211_sub_if_data *sdata,
+                               char *buf, int buflen)
+{
+       int i, len = 0;
+       const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ];
+
+       for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+               len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+       len += scnprintf(buf + len, buflen - len, "\n");
+
+       return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz);
+
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz(
+                               const struct ieee80211_sub_if_data *sdata,
+                               char *buf, int buflen)
+{
+       int i, len = 0;
+       const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ];
+
+       for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+               len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+       len += scnprintf(buf + len, buflen - len, "\n");
+
+       return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz);
+
 IEEE80211_IF_FILE(flags, flags, HEX);
 IEEE80211_IF_FILE(state, state, LHEX);
 IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC);
@@ -565,6 +597,8 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(rc_rateidx_mask_5ghz);
        DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
        DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+       DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz);
+       DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
        DEBUGFS_ADD(hw_queues);
 }
 
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
new file mode 100644 (file)
index 0000000..267c3b1
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "trace.h"
+#include "driver-ops.h"
+
+__must_check
+int drv_sta_state(struct ieee80211_local *local,
+                 struct ieee80211_sub_if_data *sdata,
+                 struct sta_info *sta,
+                 enum ieee80211_sta_state old_state,
+                 enum ieee80211_sta_state new_state)
+{
+       int ret = 0;
+
+       might_sleep();
+
+       sdata = get_bss_sdata(sdata);
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
+
+       trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
+       if (local->ops->sta_state) {
+               ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
+                                           old_state, new_state);
+       } else if (old_state == IEEE80211_STA_AUTH &&
+                  new_state == IEEE80211_STA_ASSOC) {
+               ret = drv_sta_add(local, sdata, &sta->sta);
+               if (ret == 0)
+                       sta->uploaded = true;
+       } else if (old_state == IEEE80211_STA_ASSOC &&
+                  new_state == IEEE80211_STA_AUTH) {
+               drv_sta_remove(local, sdata, &sta->sta);
+       }
+       trace_drv_return_int(local, ret);
+       return ret;
+}
index 32a2e707e2226355235907c956632d6f03aa5af5..02d91332d7dddbe4459246308d605146be4430d7 100644 (file)
@@ -573,37 +573,12 @@ static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
        trace_drv_return_void(local);
 }
 
-static inline __must_check
+__must_check
 int drv_sta_state(struct ieee80211_local *local,
                  struct ieee80211_sub_if_data *sdata,
                  struct sta_info *sta,
                  enum ieee80211_sta_state old_state,
-                 enum ieee80211_sta_state new_state)
-{
-       int ret = 0;
-
-       might_sleep();
-
-       sdata = get_bss_sdata(sdata);
-       if (!check_sdata_in_driver(sdata))
-               return -EIO;
-
-       trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
-       if (local->ops->sta_state) {
-               ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
-                                           old_state, new_state);
-       } else if (old_state == IEEE80211_STA_AUTH &&
-                  new_state == IEEE80211_STA_ASSOC) {
-               ret = drv_sta_add(local, sdata, &sta->sta);
-               if (ret == 0)
-                       sta->uploaded = true;
-       } else if (old_state == IEEE80211_STA_ASSOC &&
-                  new_state == IEEE80211_STA_AUTH) {
-               drv_sta_remove(local, sdata, &sta->sta);
-       }
-       trace_drv_return_int(local, ret);
-       return ret;
-}
+                 enum ieee80211_sta_state new_state);
 
 static inline void drv_sta_rc_update(struct ieee80211_local *local,
                                     struct ieee80211_sub_if_data *sdata,
index b12f61507f9f9a4f84eaad69f98ac209af90361e..6e52659f923f72a6ab451e73b75338e4f4e7de13 100644 (file)
@@ -84,13 +84,13 @@ struct ieee80211_local;
 #define IEEE80211_DEAUTH_FRAME_LEN     (24 /* hdr */ + 2 /* reason */)
 
 struct ieee80211_fragment_entry {
-       unsigned long first_frag_time;
-       unsigned int seq;
-       unsigned int rx_queue;
-       unsigned int last_frag;
-       unsigned int extra_len;
        struct sk_buff_head skb_list;
-       int ccmp; /* Whether fragments were encrypted with CCMP */
+       unsigned long first_frag_time;
+       u16 seq;
+       u16 extra_len;
+       u16 last_frag;
+       u8 rx_queue;
+       bool ccmp; /* Whether fragments were encrypted with CCMP */
        u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
 };
 
@@ -181,7 +181,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
 
 /**
  * enum ieee80211_packet_rx_flags - packet RX flags
- * @IEEE80211_RX_FRAGMENTED: fragmented frame
  * @IEEE80211_RX_AMSDU: a-MSDU packet
  * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
  * @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering
@@ -190,7 +189,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
  * @rx_flags field of &struct ieee80211_rx_status.
  */
 enum ieee80211_packet_rx_flags {
-       IEEE80211_RX_FRAGMENTED                 = BIT(2),
        IEEE80211_RX_AMSDU                      = BIT(3),
        IEEE80211_RX_MALFORMED_ACTION_FRM       = BIT(4),
        IEEE80211_RX_DEFERRED_RELEASE           = BIT(5),
@@ -202,8 +200,6 @@ enum ieee80211_packet_rx_flags {
  * @IEEE80211_RX_CMNTR: received on cooked monitor already
  * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
  *     to cfg80211_report_obss_beacon().
- * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
- *     reorder buffer timeout timer, not the normal RX path
  *
  * These flags are used across handling multiple interfaces
  * for a single frame.
@@ -211,10 +207,10 @@ enum ieee80211_packet_rx_flags {
 enum ieee80211_rx_flags {
        IEEE80211_RX_CMNTR              = BIT(0),
        IEEE80211_RX_BEACON_REPORTED    = BIT(1),
-       IEEE80211_RX_REORDER_TIMER      = BIT(2),
 };
 
 struct ieee80211_rx_data {
+       struct napi_struct *napi;
        struct sk_buff *skb;
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
@@ -725,6 +721,7 @@ struct ieee80211_if_mesh {
  *     back to wireless media and to the local net stack.
  * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
  * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
+ * @IEEE80211_SDATA_MU_MIMO_OWNER: indicates interface owns MU-MIMO capability
  */
 enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_ALLMULTI                = BIT(0),
@@ -732,6 +729,7 @@ enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_DONT_BRIDGE_PACKETS     = BIT(3),
        IEEE80211_SDATA_DISCONNECT_RESUME       = BIT(4),
        IEEE80211_SDATA_IN_DRIVER               = BIT(5),
+       IEEE80211_SDATA_MU_MIMO_OWNER           = BIT(6),
 };
 
 /**
@@ -903,6 +901,9 @@ struct ieee80211_sub_if_data {
        bool rc_has_mcs_mask[IEEE80211_NUM_BANDS];
        u8  rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
 
+       bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS];
+       u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX];
+
        union {
                struct ieee80211_if_ap ap;
                struct ieee80211_if_wds wds;
@@ -1010,7 +1011,6 @@ enum sdata_queue_type {
        IEEE80211_SDATA_QUEUE_AGG_STOP          = 2,
        IEEE80211_SDATA_QUEUE_RX_AGG_START      = 3,
        IEEE80211_SDATA_QUEUE_RX_AGG_STOP       = 4,
-       IEEE80211_SDATA_QUEUE_TDLS_CHSW         = 5,
 };
 
 enum {
@@ -1286,7 +1286,6 @@ struct ieee80211_local {
        unsigned int rx_handlers_queued;
        unsigned int rx_handlers_drop_nullfunc;
        unsigned int rx_handlers_drop_defrag;
-       unsigned int rx_handlers_drop_short;
        unsigned int tx_expand_skb_head;
        unsigned int tx_expand_skb_head_cloned;
        unsigned int rx_expand_skb_head_defrag;
@@ -1348,14 +1347,16 @@ struct ieee80211_local {
 
        struct ieee80211_sub_if_data __rcu *p2p_sdata;
 
-       struct napi_struct *napi;
-
        /* virtual monitor interface */
        struct ieee80211_sub_if_data __rcu *monitor_sdata;
        struct cfg80211_chan_def monitor_chandef;
 
        /* extended capabilities provided by mac80211 */
        u8 ext_capa[8];
+
+       /* TDLS channel switch */
+       struct work_struct tdls_chsw_work;
+       struct sk_buff_head skb_queue_tdls_chsw;
 };
 
 static inline struct ieee80211_sub_if_data *
@@ -1715,6 +1716,8 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                 enum ieee80211_band band, bool nss_only);
 void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
                                      struct ieee80211_sta_vht_cap *vht_cap);
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+                                    u16 vht_mask[NL80211_VHT_NSS_MAX]);
 
 /* Spectrum management */
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1763,8 +1766,6 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
 
 /* utility functions/constants */
 extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-                       enum nl80211_iftype type);
 int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
                             int rate, int erp, int short_preamble,
                             int shift);
@@ -2042,6 +2043,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
                                 enum ieee80211_chanctx_mode chanmode,
                                 u8 radar_detect);
 int ieee80211_max_num_channels(struct ieee80211_local *local);
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta);
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+                                      struct ieee80211_chanctx *ctx);
 
 /* TDLS */
 int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
@@ -2058,8 +2062,8 @@ int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
 void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
                                          struct net_device *dev,
                                          const u8 *addr);
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
-                                          struct sk_buff *skb);
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
+void ieee80211_tdls_chsw_work(struct work_struct *wk);
 
 extern const struct ethtool_ops ieee80211_ethtool_ops;
 
index 553ac6dd4867480048aed3ca0d430948f928d3c7..6964fc6a8ea2c7b46149e8be5e79c6f373f50d40 100644 (file)
@@ -1242,8 +1242,6 @@ static void ieee80211_iface_work(struct work_struct *work)
                                                        WLAN_BACK_RECIPIENT, 0,
                                                        false);
                        mutex_unlock(&local->sta_mtx);
-               } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_TDLS_CHSW) {
-                       ieee80211_process_tdls_channel_switch(sdata, skb);
                } else if (ieee80211_is_action(mgmt->frame_control) &&
                           mgmt->u.action.category == WLAN_CATEGORY_BACK) {
                        int len = skb->len;
@@ -1790,13 +1788,23 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                sband = local->hw.wiphy->bands[i];
                sdata->rc_rateidx_mask[i] =
                        sband ? (1 << sband->n_bitrates) - 1 : 0;
-               if (sband)
+               if (sband) {
+                       __le16 cap;
+                       u16 *vht_rate_mask;
+
                        memcpy(sdata->rc_rateidx_mcs_mask[i],
                               sband->ht_cap.mcs.rx_mask,
                               sizeof(sdata->rc_rateidx_mcs_mask[i]));
-               else
+
+                       cap = sband->vht_cap.vht_mcs.rx_mcs_map;
+                       vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i];
+                       ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask);
+               } else {
                        memset(sdata->rc_rateidx_mcs_mask[i], 0,
                               sizeof(sdata->rc_rateidx_mcs_mask[i]));
+                       memset(sdata->rc_rateidx_vht_mcs_mask[i], 0,
+                              sizeof(sdata->rc_rateidx_vht_mcs_mask[i]));
+               }
        }
 
        ieee80211_set_default_queues(sdata);
index b22df3a79a417c9d182647f3126e004490d3de93..44388d6a1d8e628f4324f64e95c9de675e861830 100644 (file)
@@ -336,7 +336,6 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
                        ieee80211_check_fast_xmit(sta);
                } else {
                        rcu_assign_pointer(sta->gtk[idx], new);
-                       sta->gtk_idx = idx;
                }
        } else {
                defunikey = old &&
index 3f4f9eaac14003d1a2f2f655215fd0ed3fcca499..9951ef06323e743d2c33156d15e5a21478584cd9 100644 (file)
@@ -115,9 +115,6 @@ struct ieee80211_key {
                } gen;
        } u;
 
-       /* number of times this key has been used */
-       int tx_rx_count;
-
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct {
                struct dentry *stalink;
index 3c63468b4dfb530d0e28a5f97dddf5065543ba6f..ff79a13d231db0d4197c80a67a9d119d4c870e68 100644 (file)
@@ -629,6 +629,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
        INIT_WORK(&local->sched_scan_stopped_work,
                  ieee80211_sched_scan_stopped_work);
 
+       INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work);
+
        spin_lock_init(&local->ack_status_lock);
        idr_init(&local->ack_status_frames);
 
@@ -645,6 +647,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
 
        skb_queue_head_init(&local->skb_queue);
        skb_queue_head_init(&local->skb_queue_unreliable);
+       skb_queue_head_init(&local->skb_queue_tdls_chsw);
 
        ieee80211_alloc_led_names(local);
 
@@ -1132,18 +1135,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 }
 EXPORT_SYMBOL(ieee80211_register_hw);
 
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
-                       struct net_device *napi_dev,
-                       int (*poll)(struct napi_struct *, int),
-                       int weight)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
-       netif_napi_add(napi_dev, napi, poll, weight);
-       local->napi = napi;
-}
-EXPORT_SYMBOL_GPL(ieee80211_napi_add);
-
 void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
@@ -1173,6 +1164,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 
        cancel_work_sync(&local->restart_work);
        cancel_work_sync(&local->reconfig_filter);
+       cancel_work_sync(&local->tdls_chsw_work);
        flush_work(&local->sched_scan_stopped_work);
 
        ieee80211_clear_tx_pending(local);
@@ -1183,6 +1175,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
                wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
        skb_queue_purge(&local->skb_queue);
        skb_queue_purge(&local->skb_queue_unreliable);
+       skb_queue_purge(&local->skb_queue_tdls_chsw);
 
        destroy_workqueue(local->workqueue);
        wiphy_unregister(local->hw.wiphy);
index 817098add1d6736e1632ba7f142a9b81465cd71f..e06a5ca7c9a996b311b524c37f93629ccc928a61 100644 (file)
@@ -158,7 +158,7 @@ void mesh_sta_cleanup(struct sta_info *sta)
        changed = mesh_accept_plinks_update(sdata);
        if (!sdata->u.mesh.user_mpm) {
                changed |= mesh_plink_deactivate(sta);
-               del_timer_sync(&sta->plink_timer);
+               del_timer_sync(&sta->mesh->plink_timer);
        }
 
        if (changed)
index 085edc1d056bf7adfe1f7fba5a500733bf72927a..d80e0a4c16cf98eb386bb61c3513f20619693556 100644 (file)
 
 #define MAX_PREQ_QUEUE_LEN     64
 
-/* Destination only */
-#define MP_F_DO        0x1
-/* Reply and forward */
-#define MP_F_RF        0x2
-/* Unknown Sequence Number */
-#define MP_F_USN    0x01
-/* Reason code Present */
-#define MP_F_RCODE  0x02
-
 static void mesh_queue_preq(struct mesh_path *, u8);
 
 static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
@@ -79,6 +70,12 @@ static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
 #define MSEC_TO_TU(x) (x*1000/1024)
 #define SN_GT(x, y) ((s32)(y - x) < 0)
 #define SN_LT(x, y) ((s32)(x - y) < 0)
+#define MAX_SANE_SN_DELTA 32
+
+static inline u32 SN_DELTA(u32 x, u32 y)
+{
+       return x >= y ? x - y : y - x;
+}
 
 #define net_traversal_jiffies(s) \
        msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -279,15 +276,10 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
        *pos++ = ttl;
        /* number of destinations */
        *pos++ = 1;
-       /*
-        * flags bit, bit 1 is unset if we know the sequence number and
-        * bit 2 is set if we have a reason code
+       /* Flags field has AE bit only as defined in
+        * sec 8.4.2.117 IEEE802.11-2012
         */
        *pos = 0;
-       if (!target_sn)
-               *pos |= MP_F_USN;
-       if (target_rcode)
-               *pos |= MP_F_RCODE;
        pos++;
        memcpy(pos, target, ETH_ALEN);
        pos += ETH_ALEN;
@@ -316,8 +308,9 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
        failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 
        /* moving average, scaled to 100 */
-       sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
-       if (sta->fail_avg > 95)
+       sta->mesh->fail_avg =
+               ((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed);
+       if (sta->mesh->fail_avg > 95)
                mesh_plink_broken(sta);
 }
 
@@ -333,7 +326,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
        u32 tx_time, estimated_retx;
        u64 result;
 
-       if (sta->fail_avg >= 100)
+       if (sta->mesh->fail_avg >= 100)
                return MAX_METRIC;
 
        sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
@@ -341,7 +334,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
        if (WARN_ON(!rate))
                return MAX_METRIC;
 
-       err = (sta->fail_avg << ARITH_SHIFT) / 100;
+       err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
 
        /* bitrate is in units of 100 Kbps, while we need rate in units of
         * 1Mbps. This will be corrected on tx_time computation.
@@ -441,6 +434,26 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
                                        process = false;
                                        fresh_info = false;
                                }
+                       } else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
+                               bool have_sn, newer_sn, bounced;
+
+                               have_sn = mpath->flags & MESH_PATH_SN_VALID;
+                               newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
+                               bounced = have_sn &&
+                                         (SN_DELTA(orig_sn, mpath->sn) >
+                                                       MAX_SANE_SN_DELTA);
+
+                               if (!have_sn || newer_sn) {
+                                       /* if SN is newer than what we had
+                                        * then we can take it */;
+                               } else if (bounced) {
+                                       /* if SN is way different than what
+                                        * we had then assume the other side
+                                        * rebooted or restarted */;
+                               } else {
+                                       process = false;
+                                       fresh_info = false;
+                               }
                        }
                } else {
                        mpath = mesh_path_add(sdata, orig_addr);
@@ -570,15 +583,13 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                                        SN_LT(mpath->sn, target_sn)) {
                                mpath->sn = target_sn;
                                mpath->flags |= MESH_PATH_SN_VALID;
-                       } else if ((!(target_flags & MP_F_DO)) &&
+                       } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
                                        (mpath->flags & MESH_PATH_ACTIVE)) {
                                reply = true;
                                target_metric = mpath->metric;
                                target_sn = mpath->sn;
-                               if (target_flags & MP_F_RF)
-                                       target_flags |= MP_F_DO;
-                               else
-                                       forward = false;
+                               /* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
+                               target_flags |= IEEE80211_PREQ_TO_FLAG;
                        }
                }
                rcu_read_unlock();
@@ -736,9 +747,12 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
                if (mpath->flags & MESH_PATH_ACTIVE &&
                    ether_addr_equal(ta, sta->sta.addr) &&
                    (!(mpath->flags & MESH_PATH_SN_VALID) ||
-                   SN_GT(target_sn, mpath->sn))) {
+                   SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
                        mpath->flags &= ~MESH_PATH_ACTIVE;
-                       mpath->sn = target_sn;
+                       if (target_sn != 0)
+                               mpath->sn = target_sn;
+                       else
+                               mpath->sn += 1;
                        spin_unlock_bh(&mpath->state_lock);
                        if (!ifmsh->mshcfg.dot11MeshForwarding)
                                goto endperr;
@@ -862,7 +876,7 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
 
        rcu_read_lock();
        sta = sta_info_get(sdata, mgmt->sa);
-       if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
+       if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
                rcu_read_unlock();
                return;
        }
@@ -974,7 +988,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct mesh_preq_queue *preq_node;
        struct mesh_path *mpath;
-       u8 ttl, target_flags;
+       u8 ttl, target_flags = 0;
        const u8 *da;
        u32 lifetime;
 
@@ -1033,9 +1047,9 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
        }
 
        if (preq_node->flags & PREQ_Q_F_REFRESH)
-               target_flags = MP_F_DO;
+               target_flags |= IEEE80211_PREQ_TO_FLAG;
        else
-               target_flags = MP_F_RF;
+               target_flags &= ~IEEE80211_PREQ_TO_FLAG;
 
        spin_unlock_bh(&mpath->state_lock);
        da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
@@ -1176,7 +1190,9 @@ void mesh_path_timer(unsigned long data)
                spin_unlock_bh(&mpath->state_lock);
                mesh_queue_preq(mpath, 0);
        } else {
-               mpath->flags = 0;
+               mpath->flags &= ~(MESH_PATH_RESOLVING |
+                                 MESH_PATH_RESOLVED |
+                                 MESH_PATH_REQ_QUEUED);
                mpath->exp_time = jiffies;
                spin_unlock_bh(&mpath->state_lock);
                if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
index 3b59099413fb1770e2ee2228065899bc5f9eb302..58384642e03c52bb6427fc809a9149769855e9c1 100644 (file)
 #include "rate.h"
 #include "mesh.h"
 
+#define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2)
 #define PLINK_GET_LLID(p) (p + 2)
 #define PLINK_GET_PLID(p) (p + 4)
 
-#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
+#define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \
                                jiffies + msecs_to_jiffies(t)))
 
 enum plink_event {
@@ -53,18 +54,13 @@ static const char * const mplevents[] = {
        [CLS_IGNR] = "CLS_IGNR"
 };
 
-static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
-                              enum ieee80211_self_protected_actioncode action,
-                              u8 *da, u16 llid, u16 plid, u16 reason);
-
-
 /* We only need a valid sta if user configured a minimum rssi_threshold. */
 static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
                                 struct sta_info *sta)
 {
        s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
        return rssi_threshold == 0 ||
-              (sta && (s8) -ewma_read(&sta->avg_signal) > rssi_threshold);
+              (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold);
 }
 
 /**
@@ -72,14 +68,14 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
  *
  * @sta: mesh peer link to restart
  *
- * Locking: this function must be called holding sta->plink_lock
+ * Locking: this function must be called holding sta->mesh->plink_lock
  */
 static inline void mesh_plink_fsm_restart(struct sta_info *sta)
 {
-       lockdep_assert_held(&sta->plink_lock);
-       sta->plink_state = NL80211_PLINK_LISTEN;
-       sta->llid = sta->plid = sta->reason = 0;
-       sta->plink_retries = 0;
+       lockdep_assert_held(&sta->mesh->plink_lock);
+       sta->mesh->plink_state = NL80211_PLINK_LISTEN;
+       sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0;
+       sta->mesh->plink_retries = 0;
 }
 
 /*
@@ -119,7 +115,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
        rcu_read_lock();
        list_for_each_entry_rcu(sta, &local->sta_list, list) {
                if (sdata != sta->sdata ||
-                   sta->plink_state != NL80211_PLINK_ESTAB)
+                   sta->mesh->plink_state != NL80211_PLINK_ESTAB)
                        continue;
 
                short_slot = false;
@@ -169,7 +165,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
        rcu_read_lock();
        list_for_each_entry_rcu(sta, &local->sta_list, list) {
                if (sdata != sta->sdata ||
-                   sta->plink_state != NL80211_PLINK_ESTAB)
+                   sta->mesh->plink_state != NL80211_PLINK_ESTAB)
                        continue;
 
                if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20)
@@ -204,59 +200,8 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
        return BSS_CHANGED_HT;
 }
 
-/**
- * __mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- * Returns beacon changed flag if the beacon content changed.
- *
- * Locking: the caller must hold sta->plink_lock
- */
-static u32 __mesh_plink_deactivate(struct sta_info *sta)
-{
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
-       u32 changed = 0;
-
-       lockdep_assert_held(&sta->plink_lock);
-
-       if (sta->plink_state == NL80211_PLINK_ESTAB)
-               changed = mesh_plink_dec_estab_count(sdata);
-       sta->plink_state = NL80211_PLINK_BLOCKED;
-       mesh_path_flush_by_nexthop(sta);
-
-       ieee80211_mps_sta_status_update(sta);
-       changed |= ieee80211_mps_set_sta_local_pm(sta,
-                       NL80211_MESH_POWER_UNKNOWN);
-
-       return changed;
-}
-
-/**
- * mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- */
-u32 mesh_plink_deactivate(struct sta_info *sta)
-{
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
-       u32 changed;
-
-       spin_lock_bh(&sta->plink_lock);
-       changed = __mesh_plink_deactivate(sta);
-       sta->reason = WLAN_REASON_MESH_PEER_CANCELED;
-       mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
-                           sta->sta.addr, sta->llid, sta->plid,
-                           sta->reason);
-       spin_unlock_bh(&sta->plink_lock);
-
-       return changed;
-}
-
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+                              struct sta_info *sta,
                               enum ieee80211_self_protected_actioncode action,
                               u8 *da, u16 llid, u16 plid, u16 reason)
 {
@@ -306,7 +251,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
                        /* AID */
                        pos = skb_put(skb, 2);
-                       put_unaligned_le16(plid, pos);
+                       put_unaligned_le16(sta->sta.aid, pos);
                }
                if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
                    ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -375,6 +320,58 @@ free:
        return err;
 }
 
+/**
+ * __mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
+ *
+ * Locking: the caller must hold sta->mesh->plink_lock
+ */
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u32 changed = 0;
+
+       lockdep_assert_held(&sta->mesh->plink_lock);
+
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+               changed = mesh_plink_dec_estab_count(sdata);
+       sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+       mesh_path_flush_by_nexthop(sta);
+
+       ieee80211_mps_sta_status_update(sta);
+       changed |= ieee80211_mps_set_sta_local_pm(sta,
+                       NL80211_MESH_POWER_UNKNOWN);
+
+       return changed;
+}
+
+/**
+ * mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ */
+u32 mesh_plink_deactivate(struct sta_info *sta)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       u32 changed;
+
+       spin_lock_bh(&sta->mesh->plink_lock);
+       changed = __mesh_plink_deactivate(sta);
+       sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED;
+       mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE,
+                           sta->sta.addr, sta->mesh->llid, sta->mesh->plid,
+                           sta->mesh->reason);
+       spin_unlock_bh(&sta->mesh->plink_lock);
+
+       return changed;
+}
+
 static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
                               struct sta_info *sta,
                               struct ieee802_11_elems *elems, bool insert)
@@ -388,13 +385,14 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
        sband = local->hw.wiphy->bands[band];
        rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
 
-       spin_lock_bh(&sta->plink_lock);
+       spin_lock_bh(&sta->mesh->plink_lock);
        sta->last_rx = jiffies;
 
        /* rates and capabilities don't change during peering */
-       if (sta->plink_state == NL80211_PLINK_ESTAB && sta->processed_beacon)
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+           sta->mesh->processed_beacon)
                goto out;
-       sta->processed_beacon = true;
+       sta->mesh->processed_beacon = true;
 
        if (sta->sta.supp_rates[band] != rates)
                changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
@@ -421,23 +419,57 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
        else
                rate_control_rate_update(local, sband, sta, changed);
 out:
-       spin_unlock_bh(&sta->plink_lock);
+       spin_unlock_bh(&sta->mesh->plink_lock);
+}
+
+static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata)
+{
+       struct sta_info *sta;
+       unsigned long *aid_map;
+       int aid;
+
+       aid_map = kcalloc(BITS_TO_LONGS(IEEE80211_MAX_AID + 1),
+                         sizeof(*aid_map), GFP_KERNEL);
+       if (!aid_map)
+               return -ENOMEM;
+
+       /* reserve aid 0 for mcast indication */
+       __set_bit(0, aid_map);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list)
+               __set_bit(sta->sta.aid, aid_map);
+       rcu_read_unlock();
+
+       aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1);
+       kfree(aid_map);
+
+       if (aid > IEEE80211_MAX_AID)
+               return -ENOBUFS;
+
+       return aid;
 }
 
 static struct sta_info *
 __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
 {
        struct sta_info *sta;
+       int aid;
 
        if (sdata->local->num_sta >= MESH_MAX_PLINKS)
                return NULL;
 
+       aid = mesh_allocate_aid(sdata);
+       if (aid < 0)
+               return NULL;
+
        sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
        if (!sta)
                return NULL;
 
-       sta->plink_state = NL80211_PLINK_LISTEN;
+       sta->mesh->plink_state = NL80211_PLINK_LISTEN;
        sta->sta.wme = true;
+       sta->sta.aid = aid;
 
        sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
        sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -524,7 +556,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
                goto out;
 
        if (mesh_peer_accepts_plinks(elems) &&
-           sta->plink_state == NL80211_PLINK_LISTEN &&
+           sta->mesh->plink_state == NL80211_PLINK_LISTEN &&
            sdata->u.mesh.accepting_plinks &&
            sdata->u.mesh.mshcfg.auto_open_plinks &&
            rssi_threshold_check(sdata, sta))
@@ -554,52 +586,52 @@ static void mesh_plink_timer(unsigned long data)
        if (sta->sdata->local->quiescing)
                return;
 
-       spin_lock_bh(&sta->plink_lock);
+       spin_lock_bh(&sta->mesh->plink_lock);
 
        /* If a timer fires just before a state transition on another CPU,
         * we may have already extended the timeout and changed state by the
         * time we've acquired the lock and arrived  here.  In that case,
         * skip this timer and wait for the new one.
         */
-       if (time_before(jiffies, sta->plink_timer.expires)) {
+       if (time_before(jiffies, sta->mesh->plink_timer.expires)) {
                mpl_dbg(sta->sdata,
                        "Ignoring timer for %pM in state %s (timer adjusted)",
-                       sta->sta.addr, mplstates[sta->plink_state]);
-               spin_unlock_bh(&sta->plink_lock);
+                       sta->sta.addr, mplstates[sta->mesh->plink_state]);
+               spin_unlock_bh(&sta->mesh->plink_lock);
                return;
        }
 
        /* del_timer() and handler may race when entering these states */
-       if (sta->plink_state == NL80211_PLINK_LISTEN ||
-           sta->plink_state == NL80211_PLINK_ESTAB) {
+       if (sta->mesh->plink_state == NL80211_PLINK_LISTEN ||
+           sta->mesh->plink_state == NL80211_PLINK_ESTAB) {
                mpl_dbg(sta->sdata,
                        "Ignoring timer for %pM in state %s (timer deleted)",
-                       sta->sta.addr, mplstates[sta->plink_state]);
-               spin_unlock_bh(&sta->plink_lock);
+                       sta->sta.addr, mplstates[sta->mesh->plink_state]);
+               spin_unlock_bh(&sta->mesh->plink_lock);
                return;
        }
 
        mpl_dbg(sta->sdata,
                "Mesh plink timer for %pM fired on state %s\n",
-               sta->sta.addr, mplstates[sta->plink_state]);
+               sta->sta.addr, mplstates[sta->mesh->plink_state]);
        sdata = sta->sdata;
        mshcfg = &sdata->u.mesh.mshcfg;
 
-       switch (sta->plink_state) {
+       switch (sta->mesh->plink_state) {
        case NL80211_PLINK_OPN_RCVD:
        case NL80211_PLINK_OPN_SNT:
                /* retry timer */
-               if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) {
+               if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) {
                        u32 rand;
                        mpl_dbg(sta->sdata,
                                "Mesh plink for %pM (retry, timeout): %d %d\n",
-                               sta->sta.addr, sta->plink_retries,
-                               sta->plink_timeout);
+                               sta->sta.addr, sta->mesh->plink_retries,
+                               sta->mesh->plink_timeout);
                        get_random_bytes(&rand, sizeof(u32));
-                       sta->plink_timeout = sta->plink_timeout +
-                                            rand % sta->plink_timeout;
-                       ++sta->plink_retries;
-                       mod_plink_timer(sta, sta->plink_timeout);
+                       sta->mesh->plink_timeout = sta->mesh->plink_timeout +
+                                            rand % sta->mesh->plink_timeout;
+                       ++sta->mesh->plink_retries;
+                       mod_plink_timer(sta, sta->mesh->plink_timeout);
                        action = WLAN_SP_MESH_PEERING_OPEN;
                        break;
                }
@@ -609,31 +641,31 @@ static void mesh_plink_timer(unsigned long data)
                /* confirm timer */
                if (!reason)
                        reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT;
-               sta->plink_state = NL80211_PLINK_HOLDING;
+               sta->mesh->plink_state = NL80211_PLINK_HOLDING;
                mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
                action = WLAN_SP_MESH_PEERING_CLOSE;
                break;
        case NL80211_PLINK_HOLDING:
                /* holding timer */
-               del_timer(&sta->plink_timer);
+               del_timer(&sta->mesh->plink_timer);
                mesh_plink_fsm_restart(sta);
                break;
        default:
                break;
        }
-       spin_unlock_bh(&sta->plink_lock);
+       spin_unlock_bh(&sta->mesh->plink_lock);
        if (action)
-               mesh_plink_frame_tx(sdata, action, sta->sta.addr,
-                                   sta->llid, sta->plid, reason);
+               mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+                                   sta->mesh->llid, sta->mesh->plid, reason);
 }
 
 static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout)
 {
-       sta->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
-       sta->plink_timer.data = (unsigned long) sta;
-       sta->plink_timer.function = mesh_plink_timer;
-       sta->plink_timeout = timeout;
-       add_timer(&sta->plink_timer);
+       sta->mesh->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
+       sta->mesh->plink_timer.data = (unsigned long) sta;
+       sta->mesh->plink_timer.function = mesh_plink_timer;
+       sta->mesh->plink_timeout = timeout;
+       add_timer(&sta->mesh->plink_timer);
 }
 
 static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
@@ -645,7 +677,7 @@ static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
 
        rcu_read_lock();
        list_for_each_entry_rcu(sta, &local->sta_list, list) {
-               if (!memcmp(&sta->llid, &llid, sizeof(llid))) {
+               if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) {
                        in_use = true;
                        break;
                }
@@ -661,8 +693,6 @@ static u16 mesh_get_new_llid(struct ieee80211_sub_if_data *sdata)
 
        do {
                get_random_bytes(&llid, sizeof(llid));
-               /* for mesh PS we still only have the AID range for TIM bits */
-               llid = (llid % IEEE80211_MAX_AID) + 1;
        } while (llid_in_use(sdata, llid));
 
        return llid;
@@ -676,16 +706,16 @@ u32 mesh_plink_open(struct sta_info *sta)
        if (!test_sta_flag(sta, WLAN_STA_AUTH))
                return 0;
 
-       spin_lock_bh(&sta->plink_lock);
-       sta->llid = mesh_get_new_llid(sdata);
-       if (sta->plink_state != NL80211_PLINK_LISTEN &&
-           sta->plink_state != NL80211_PLINK_BLOCKED) {
-               spin_unlock_bh(&sta->plink_lock);
+       spin_lock_bh(&sta->mesh->plink_lock);
+       sta->mesh->llid = mesh_get_new_llid(sdata);
+       if (sta->mesh->plink_state != NL80211_PLINK_LISTEN &&
+           sta->mesh->plink_state != NL80211_PLINK_BLOCKED) {
+               spin_unlock_bh(&sta->mesh->plink_lock);
                return 0;
        }
-       sta->plink_state = NL80211_PLINK_OPN_SNT;
+       sta->mesh->plink_state = NL80211_PLINK_OPN_SNT;
        mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
-       spin_unlock_bh(&sta->plink_lock);
+       spin_unlock_bh(&sta->mesh->plink_lock);
        mpl_dbg(sdata,
                "Mesh plink: starting establishment with %pM\n",
                sta->sta.addr);
@@ -693,8 +723,8 @@ u32 mesh_plink_open(struct sta_info *sta)
        /* set the non-peer mode to active during peering */
        changed = ieee80211_mps_local_status_update(sdata);
 
-       mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
-                           sta->sta.addr, sta->llid, 0, 0);
+       mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN,
+                           sta->sta.addr, sta->mesh->llid, 0, 0);
        return changed;
 }
 
@@ -702,10 +732,10 @@ u32 mesh_plink_block(struct sta_info *sta)
 {
        u32 changed;
 
-       spin_lock_bh(&sta->plink_lock);
+       spin_lock_bh(&sta->mesh->plink_lock);
        changed = __mesh_plink_deactivate(sta);
-       sta->plink_state = NL80211_PLINK_BLOCKED;
-       spin_unlock_bh(&sta->plink_lock);
+       sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+       spin_unlock_bh(&sta->mesh->plink_lock);
 
        return changed;
 }
@@ -715,12 +745,11 @@ static void mesh_plink_close(struct ieee80211_sub_if_data *sdata,
                             enum plink_event event)
 {
        struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
-
        u16 reason = (event == CLS_ACPT) ?
                     WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG;
 
-       sta->reason = reason;
-       sta->plink_state = NL80211_PLINK_HOLDING;
+       sta->mesh->reason = reason;
+       sta->mesh->plink_state = NL80211_PLINK_HOLDING;
        mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
 }
 
@@ -730,8 +759,8 @@ static u32 mesh_plink_establish(struct ieee80211_sub_if_data *sdata,
        struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
        u32 changed = 0;
 
-       del_timer(&sta->plink_timer);
-       sta->plink_state = NL80211_PLINK_ESTAB;
+       del_timer(&sta->mesh->plink_timer);
+       sta->mesh->plink_state = NL80211_PLINK_ESTAB;
        changed |= mesh_plink_inc_estab_count(sdata);
        changed |= mesh_set_ht_prot_mode(sdata);
        changed |= mesh_set_short_slot_time(sdata);
@@ -758,18 +787,18 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
        u32 changed = 0;
 
        mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
-               mplstates[sta->plink_state], mplevents[event]);
+               mplstates[sta->mesh->plink_state], mplevents[event]);
 
-       spin_lock_bh(&sta->plink_lock);
-       switch (sta->plink_state) {
+       spin_lock_bh(&sta->mesh->plink_lock);
+       switch (sta->mesh->plink_state) {
        case NL80211_PLINK_LISTEN:
                switch (event) {
                case CLS_ACPT:
                        mesh_plink_fsm_restart(sta);
                        break;
                case OPN_ACPT:
-                       sta->plink_state = NL80211_PLINK_OPN_RCVD;
-                       sta->llid = mesh_get_new_llid(sdata);
+                       sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
+                       sta->mesh->llid = mesh_get_new_llid(sdata);
                        mesh_plink_timer_set(sta,
                                             mshcfg->dot11MeshRetryTimeout);
 
@@ -791,11 +820,11 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
                        break;
                case OPN_ACPT:
                        /* retry timer is left untouched */
-                       sta->plink_state = NL80211_PLINK_OPN_RCVD;
+                       sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
                        action = WLAN_SP_MESH_PEERING_CONFIRM;
                        break;
                case CNF_ACPT:
-                       sta->plink_state = NL80211_PLINK_CNF_RCVD;
+                       sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD;
                        mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout);
                        break;
                default:
@@ -855,7 +884,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
        case NL80211_PLINK_HOLDING:
                switch (event) {
                case CLS_ACPT:
-                       del_timer(&sta->plink_timer);
+                       del_timer(&sta->mesh->plink_timer);
                        mesh_plink_fsm_restart(sta);
                        break;
                case OPN_ACPT:
@@ -874,17 +903,18 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
                 */
                break;
        }
-       spin_unlock_bh(&sta->plink_lock);
+       spin_unlock_bh(&sta->mesh->plink_lock);
        if (action) {
-               mesh_plink_frame_tx(sdata, action, sta->sta.addr,
-                                   sta->llid, sta->plid, sta->reason);
+               mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+                                   sta->mesh->llid, sta->mesh->plid,
+                                   sta->mesh->reason);
 
                /* also send confirm in open case */
                if (action == WLAN_SP_MESH_PEERING_OPEN) {
-                       mesh_plink_frame_tx(sdata,
+                       mesh_plink_frame_tx(sdata, sta,
                                            WLAN_SP_MESH_PEERING_CONFIRM,
-                                           sta->sta.addr, sta->llid,
-                                           sta->plid, 0);
+                                           sta->sta.addr, sta->mesh->llid,
+                                           sta->mesh->plid, 0);
                }
        }
 
@@ -939,7 +969,7 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                        mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
                        goto out;
                }
-               if (sta->plink_state == NL80211_PLINK_BLOCKED)
+               if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED)
                        goto out;
        }
 
@@ -954,7 +984,7 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                if (!matches_local)
                        event = OPN_RJCT;
                if (!mesh_plink_free_count(sdata) ||
-                   (sta->plid && sta->plid != plid))
+                   (sta->mesh->plid && sta->mesh->plid != plid))
                        event = OPN_IGNR;
                else
                        event = OPN_ACPT;
@@ -963,14 +993,14 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                if (!matches_local)
                        event = CNF_RJCT;
                if (!mesh_plink_free_count(sdata) ||
-                   sta->llid != llid ||
-                   (sta->plid && sta->plid != plid))
+                   sta->mesh->llid != llid ||
+                   (sta->mesh->plid && sta->mesh->plid != plid))
                        event = CNF_IGNR;
                else
                        event = CNF_ACPT;
                break;
        case WLAN_SP_MESH_PEERING_CLOSE:
-               if (sta->plink_state == NL80211_PLINK_ESTAB)
+               if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
                        /* Do not check for llid or plid. This does not
                         * follow the standard but since multiple plinks
                         * per sta are not supported, it is necessary in
@@ -981,9 +1011,9 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                         * restarted.
                         */
                        event = CLS_ACPT;
-               else if (sta->plid != plid)
+               else if (sta->mesh->plid != plid)
                        event = CLS_IGNR;
-               else if (ie_len == 8 && sta->llid != llid)
+               else if (ie_len == 8 && sta->mesh->llid != llid)
                        event = CLS_IGNR;
                else
                        event = CLS_ACPT;
@@ -1070,9 +1100,9 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
                        mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
                        goto unlock_rcu;
                }
-               sta->plid = plid;
+               sta->mesh->plid = plid;
        } else if (!sta && event == OPN_RJCT) {
-               mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+               mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE,
                                    mgmt->sa, 0, plid,
                                    WLAN_REASON_MESH_CONFIG);
                goto unlock_rcu;
@@ -1081,9 +1111,13 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
                goto unlock_rcu;
        }
 
-       /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
-       if (!sta->plid && event == CNF_ACPT)
-               sta->plid = plid;
+       if (event == CNF_ACPT) {
+               /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+               if (!sta->mesh->plid)
+                       sta->mesh->plid = plid;
+
+               sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt));
+       }
 
        changed |= mesh_plink_fsm(sdata, sta, event);
 
index ad8b377b4b9f6cfa5d5e222a3c0aff169f4d72d1..90a268abea171aebc5d5907929db0804a2b0f7b0 100644 (file)
@@ -92,16 +92,16 @@ u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata)
                if (sdata != sta->sdata)
                        continue;
 
-               switch (sta->plink_state) {
+               switch (sta->mesh->plink_state) {
                case NL80211_PLINK_OPN_SNT:
                case NL80211_PLINK_OPN_RCVD:
                case NL80211_PLINK_CNF_RCVD:
                        peering = true;
                        break;
                case NL80211_PLINK_ESTAB:
-                       if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
+                       if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
                                light_sleep_cnt++;
-                       else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
+                       else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
                                deep_sleep_cnt++;
                        break;
                default:
@@ -153,19 +153,19 @@ u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
 
-       if (sta->local_pm == pm)
+       if (sta->mesh->local_pm == pm)
                return 0;
 
        mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
                pm, sta->sta.addr);
 
-       sta->local_pm = pm;
+       sta->mesh->local_pm = pm;
 
        /*
         * announce peer-specific power mode transition
         * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
         */
-       if (sta->plink_state == NL80211_PLINK_ESTAB)
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
                mps_qos_null_tx(sta);
 
        return ieee80211_mps_local_status_update(sdata);
@@ -197,8 +197,8 @@ void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
 
        if (is_unicast_ether_addr(hdr->addr1) &&
            ieee80211_is_data_qos(hdr->frame_control) &&
-           sta->plink_state == NL80211_PLINK_ESTAB)
-               pm = sta->local_pm;
+           sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+               pm = sta->mesh->local_pm;
        else
                pm = sdata->u.mesh.nonpeer_pm;
 
@@ -241,16 +241,16 @@ void ieee80211_mps_sta_status_update(struct sta_info *sta)
         * use peer-specific power mode if peering is established and the
         * peer's power mode is known
         */
-       if (sta->plink_state == NL80211_PLINK_ESTAB &&
-           sta->peer_pm != NL80211_MESH_POWER_UNKNOWN)
-               pm = sta->peer_pm;
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+           sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN)
+               pm = sta->mesh->peer_pm;
        else
-               pm = sta->nonpeer_pm;
+               pm = sta->mesh->nonpeer_pm;
 
        do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
 
        /* clear the MPSP flags for non-peers or active STA */
-       if (sta->plink_state != NL80211_PLINK_ESTAB) {
+       if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
                clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
                clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
        } else if (!do_buffer) {
@@ -296,13 +296,13 @@ static void mps_set_sta_peer_pm(struct sta_info *sta,
                pm = NL80211_MESH_POWER_ACTIVE;
        }
 
-       if (sta->peer_pm == pm)
+       if (sta->mesh->peer_pm == pm)
                return;
 
        mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
                sta->sta.addr, pm);
 
-       sta->peer_pm = pm;
+       sta->mesh->peer_pm = pm;
 
        ieee80211_mps_sta_status_update(sta);
 }
@@ -317,13 +317,13 @@ static void mps_set_sta_nonpeer_pm(struct sta_info *sta,
        else
                pm = NL80211_MESH_POWER_ACTIVE;
 
-       if (sta->nonpeer_pm == pm)
+       if (sta->mesh->nonpeer_pm == pm)
                return;
 
        mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
                sta->sta.addr, pm);
 
-       sta->nonpeer_pm = pm;
+       sta->mesh->nonpeer_pm = pm;
 
        ieee80211_mps_sta_status_update(sta);
 }
@@ -552,7 +552,7 @@ void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
        } else {
                if (eosp)
                        clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
-               else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE)
+               else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE)
                        set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
 
                if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
@@ -577,9 +577,9 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
        int ac, buffer_local = 0;
        bool has_buffered = false;
 
-       if (sta->plink_state == NL80211_PLINK_ESTAB)
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
                has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
-                                                  sta->llid);
+                                                  sta->mesh->aid);
 
        if (has_buffered)
                mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
@@ -598,7 +598,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
        if (!has_buffered && !buffer_local)
                return;
 
-       if (sta->plink_state == NL80211_PLINK_ESTAB)
+       if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
                mpsp_trigger_send(sta, has_buffered, !buffer_local);
        else
                mps_frame_deliver(sta, 1);
index 09625d6205c31418edba53a62ee027245f232050..64bc22ad94965c4615eaaa94aae02b45891204e7 100644 (file)
@@ -127,14 +127,14 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
 
        /* Timing offset calculation (see 13.13.2.2.2) */
        t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
-       sta->t_offset = t_t - t_r;
+       sta->mesh->t_offset = t_t - t_r;
 
        if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
-               s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset;
+               s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset;
                msync_dbg(sdata,
-                         "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
-                         sta->sta.addr, (long long) sta->t_offset,
-                         (long long) sta->t_offset_setpoint,
+                         "STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n",
+                         sta->sta.addr, (long long) sta->mesh->t_offset,
+                         (long long) sta->mesh->t_offset_setpoint,
                          (long long) t_clockdrift);
 
                if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
@@ -152,12 +152,12 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
                        ifmsh->sync_offset_clockdrift_max = t_clockdrift;
                spin_unlock_bh(&ifmsh->sync_offset_lock);
        } else {
-               sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
+               sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN;
                set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
                msync_dbg(sdata,
-                         "STA %pM : offset was invalid, sta->t_offset=%lld\n",
+                         "STA %pM : offset was invalid, t_offset=%lld\n",
                          sta->sta.addr,
-                         (long long) sta->t_offset);
+                         (long long) sta->mesh->t_offset);
        }
 
 no_sync:
index 9b2cc278ac2afc60920ebec3083bebc35c497b61..705ef1d040edfb70042fdd9cd25f050b19dab4c0 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -538,11 +539,16 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
        ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
+/* This function determines vht capability flags for the association
+ * and builds the IE.
+ * Note - the function may set the owner of the MU-MIMO capability
+ */
 static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
                                 struct sk_buff *skb,
                                 struct ieee80211_supported_band *sband,
                                 struct ieee80211_vht_cap *ap_vht_cap)
 {
+       struct ieee80211_local *local = sdata->local;
        u8 *pos;
        u32 cap;
        struct ieee80211_sta_vht_cap vht_cap;
@@ -576,7 +582,34 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
         */
        if (!(ap_vht_cap->vht_cap_info &
                        cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
-               cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+               cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+                        IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+       else if (!(ap_vht_cap->vht_cap_info &
+                       cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+               cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+       /*
+        * If some other vif is using the MU-MIMO capablity we cannot associate
+        * using MU-MIMO - this will lead to contradictions in the group-id
+        * mechanism.
+        * Ownership is defined since association request, in order to avoid
+        * simultaneous associations with MU-MIMO.
+        */
+       if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) {
+               bool disable_mu_mimo = false;
+               struct ieee80211_sub_if_data *other;
+
+               list_for_each_entry_rcu(other, &local->interfaces, list) {
+                       if (other->flags & IEEE80211_SDATA_MU_MIMO_OWNER) {
+                               disable_mu_mimo = true;
+                               break;
+                       }
+               }
+               if (disable_mu_mimo)
+                       cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+               else
+                       sdata->flags |= IEEE80211_SDATA_MU_MIMO_OWNER;
+       }
 
        mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
 
@@ -1096,24 +1129,6 @@ static void ieee80211_chswitch_timer(unsigned long data)
        ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
 }
 
-static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
-{
-       struct sta_info *sta;
-       u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
-               if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
-                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
-                       continue;
-
-               ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
-                                           NL80211_TDLS_TEARDOWN, reason,
-                                           GFP_ATOMIC);
-       }
-       rcu_read_unlock();
-}
-
 static void
 ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                                 u64 timestamp, u32 device_timestamp,
@@ -2076,6 +2091,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
        memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
        memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
+       sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
 
        sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
 
@@ -2538,6 +2554,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
                eth_zero_addr(sdata->u.mgd.bssid);
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
                sdata->u.mgd.flags = 0;
+               sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
                mutex_lock(&sdata->local->mtx);
                ieee80211_vif_release_channel(sdata);
                mutex_unlock(&sdata->local->mtx);
@@ -3034,12 +3051,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
 
        rate_control_rate_init(sta);
 
-       if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
+       if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
                set_sta_flag(sta, WLAN_STA_MFP);
-               sta->sta.mfp = true;
-       } else {
-               sta->sta.mfp = false;
-       }
 
        sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
 
index 358d5f9d820788acef2f439dee6ac88250724edc..573b81a1fb2d882487f75ccc24493cc1b9deee5e 100644 (file)
@@ -179,7 +179,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
-       u32 changed = BSS_CHANGED_OCB;
+       u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
        int err;
 
        if (ifocb->joined == true)
index fda33f961d83ce44f05ab3298113877d01768fb2..9857693b91ec721ff71e3f3cd1087ccc289912e1 100644 (file)
@@ -29,6 +29,65 @@ module_param(ieee80211_default_rc_algo, charp, 0644);
 MODULE_PARM_DESC(ieee80211_default_rc_algo,
                 "Default rate control algorithm for mac80211 to use");
 
+void rate_control_rate_init(struct sta_info *sta)
+{
+       struct ieee80211_local *local = sta->sdata->local;
+       struct rate_control_ref *ref = sta->rate_ctrl;
+       struct ieee80211_sta *ista = &sta->sta;
+       void *priv_sta = sta->rate_ctrl_priv;
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+
+       ieee80211_sta_set_rx_nss(sta);
+
+       if (!ref)
+               return;
+
+       rcu_read_lock();
+
+       chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+       if (WARN_ON(!chanctx_conf)) {
+               rcu_read_unlock();
+               return;
+       }
+
+       sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
+
+       spin_lock_bh(&sta->rate_ctrl_lock);
+       ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+                           priv_sta);
+       spin_unlock_bh(&sta->rate_ctrl_lock);
+       rcu_read_unlock();
+       set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
+}
+
+void rate_control_rate_update(struct ieee80211_local *local,
+                                   struct ieee80211_supported_band *sband,
+                                   struct sta_info *sta, u32 changed)
+{
+       struct rate_control_ref *ref = local->rate_ctrl;
+       struct ieee80211_sta *ista = &sta->sta;
+       void *priv_sta = sta->rate_ctrl_priv;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+
+       if (ref && ref->ops->rate_update) {
+               rcu_read_lock();
+
+               chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+               if (WARN_ON(!chanctx_conf)) {
+                       rcu_read_unlock();
+                       return;
+               }
+
+               spin_lock_bh(&sta->rate_ctrl_lock);
+               ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+                                     ista, priv_sta, changed);
+               spin_unlock_bh(&sta->rate_ctrl_lock);
+               rcu_read_unlock();
+       }
+       drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+}
+
 int ieee80211_rate_control_register(const struct rate_control_ops *ops)
 {
        struct rate_control_alg *alg;
@@ -294,39 +353,37 @@ bool rate_control_send_low(struct ieee80211_sta *pubsta,
 }
 EXPORT_SYMBOL(rate_control_send_low);
 
-static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
-                                      int n_bitrates, u32 mask)
+static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask)
 {
        int j;
 
        /* See whether the selected rate or anything below it is allowed. */
-       for (j = rate->idx; j >= 0; j--) {
+       for (j = *rate_idx; j >= 0; j--) {
                if (mask & (1 << j)) {
                        /* Okay, found a suitable rate. Use it. */
-                       rate->idx = j;
+                       *rate_idx = j;
                        return true;
                }
        }
 
        /* Try to find a higher rate that would be allowed */
-       for (j = rate->idx + 1; j < n_bitrates; j++) {
+       for (j = *rate_idx + 1; j < n_bitrates; j++) {
                if (mask & (1 << j)) {
                        /* Okay, found a suitable rate. Use it. */
-                       rate->idx = j;
+                       *rate_idx = j;
                        return true;
                }
        }
        return false;
 }
 
-static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
-                                   u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask)
 {
        int i, j;
        int ridx, rbit;
 
-       ridx = rate->idx / 8;
-       rbit = rate->idx % 8;
+       ridx = *rate_idx / 8;
+       rbit = *rate_idx % 8;
 
        /* sanity check */
        if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
@@ -336,20 +393,20 @@ static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
        for (i = ridx; i >= 0; i--) {
                for (j = rbit; j >= 0; j--)
                        if (mcs_mask[i] & BIT(j)) {
-                               rate->idx = i * 8 + j;
+                               *rate_idx = i * 8 + j;
                                return true;
                        }
                rbit = 7;
        }
 
        /* Try to find a higher rate that would be allowed */
-       ridx = (rate->idx + 1) / 8;
-       rbit = (rate->idx + 1) % 8;
+       ridx = (*rate_idx + 1) / 8;
+       rbit = (*rate_idx + 1) % 8;
 
        for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
                for (j = rbit; j < 8; j++)
                        if (mcs_mask[i] & BIT(j)) {
-                               rate->idx = i * 8 + j;
+                               *rate_idx = i * 8 + j;
                                return true;
                        }
                rbit = 0;
@@ -357,37 +414,93 @@ static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
        return false;
 }
 
+static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask)
+{
+       int i, j;
+       int ridx, rbit;
+
+       ridx = *rate_idx >> 4;
+       rbit = *rate_idx & 0xf;
+
+       if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX)
+               return false;
+
+       /* See whether the selected rate or anything below it is allowed. */
+       for (i = ridx; i >= 0; i--) {
+               for (j = rbit; j >= 0; j--) {
+                       if (vht_mask[i] & BIT(j)) {
+                               *rate_idx = (i << 4) | j;
+                               return true;
+                       }
+               }
+               rbit = 15;
+       }
 
+       /* Try to find a higher rate that would be allowed */
+       ridx = (*rate_idx + 1) >> 4;
+       rbit = (*rate_idx + 1) & 0xf;
 
-static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
+       for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) {
+               for (j = rbit; j < 16; j++) {
+                       if (vht_mask[i] & BIT(j)) {
+                               *rate_idx = (i << 4) | j;
+                               return true;
+                       }
+               }
+               rbit = 0;
+       }
+       return false;
+}
+
+static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags,
                                struct ieee80211_supported_band *sband,
                                enum nl80211_chan_width chan_width,
                                u32 mask,
-                               u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+                               u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+                               u16 vht_mask[NL80211_VHT_NSS_MAX])
 {
-       struct ieee80211_tx_rate alt_rate;
+       if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+               /* handle VHT rates */
+               if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask))
+                       return;
+
+               *rate_idx = 0;
+               /* keep protection flags */
+               *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+                               IEEE80211_TX_RC_USE_CTS_PROTECT |
+                               IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
 
-       /* handle HT rates */
-       if (rate->flags & IEEE80211_TX_RC_MCS) {
-               if (rate_idx_match_mcs_mask(rate, mcs_mask))
+               *rate_flags |= IEEE80211_TX_RC_MCS;
+               if (chan_width == NL80211_CHAN_WIDTH_40)
+                       *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+               if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
                        return;
 
                /* also try the legacy rates. */
-               alt_rate.idx = 0;
+               *rate_flags &= ~(IEEE80211_TX_RC_MCS |
+                                IEEE80211_TX_RC_40_MHZ_WIDTH);
+               if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+                                              mask))
+                       return;
+       } else if (*rate_flags & IEEE80211_TX_RC_MCS) {
+               /* handle HT rates */
+               if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
+                       return;
+
+               /* also try the legacy rates. */
+               *rate_idx = 0;
                /* keep protection flags */
-               alt_rate.flags = rate->flags &
-                                (IEEE80211_TX_RC_USE_RTS_CTS |
-                                 IEEE80211_TX_RC_USE_CTS_PROTECT |
-                                 IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
-               alt_rate.count = rate->count;
-               if (rate_idx_match_legacy_mask(&alt_rate,
-                                              sband->n_bitrates, mask)) {
-                       *rate = alt_rate;
+               *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+                               IEEE80211_TX_RC_USE_CTS_PROTECT |
+                               IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+               if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+                                              mask))
                        return;
-               }
-       } else if (!(rate->flags & IEEE80211_TX_RC_VHT_MCS)) {
+       } else {
                /* handle legacy rates */
-               if (rate_idx_match_legacy_mask(rate, sband->n_bitrates, mask))
+               if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+                                              mask))
                        return;
 
                /* if HT BSS, and we handle a data frame, also try HT rates */
@@ -400,23 +513,19 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
                        break;
                }
 
-               alt_rate.idx = 0;
+               *rate_idx = 0;
                /* keep protection flags */
-               alt_rate.flags = rate->flags &
-                                (IEEE80211_TX_RC_USE_RTS_CTS |
-                                 IEEE80211_TX_RC_USE_CTS_PROTECT |
-                                 IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
-               alt_rate.count = rate->count;
+               *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+                               IEEE80211_TX_RC_USE_CTS_PROTECT |
+                               IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
 
-               alt_rate.flags |= IEEE80211_TX_RC_MCS;
+               *rate_flags |= IEEE80211_TX_RC_MCS;
 
                if (chan_width == NL80211_CHAN_WIDTH_40)
-                       alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+                       *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 
-               if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
-                       *rate = alt_rate;
+               if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
                        return;
-               }
        }
 
        /*
@@ -569,18 +678,92 @@ static void rate_control_fill_sta_table(struct ieee80211_sta *sta,
        }
 }
 
+static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
+                                 struct ieee80211_supported_band *sband,
+                                 struct ieee80211_sta *sta, u32 *mask,
+                                 u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+                                 u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+       u32 i, flags;
+
+       *mask = sdata->rc_rateidx_mask[sband->band];
+       flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+       for (i = 0; i < sband->n_bitrates; i++) {
+               if ((flags & sband->bitrates[i].flags) != flags)
+                       *mask &= ~BIT(i);
+       }
+
+       if (*mask == (1 << sband->n_bitrates) - 1 &&
+           !sdata->rc_has_mcs_mask[sband->band] &&
+           !sdata->rc_has_vht_mcs_mask[sband->band])
+               return false;
+
+       if (sdata->rc_has_mcs_mask[sband->band])
+               memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band],
+                      IEEE80211_HT_MCS_MASK_LEN);
+       else
+               memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN);
+
+       if (sdata->rc_has_vht_mcs_mask[sband->band])
+               memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band],
+                      sizeof(u16) * NL80211_VHT_NSS_MAX);
+       else
+               memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+       if (sta) {
+               __le16 sta_vht_cap;
+               u16 sta_vht_mask[NL80211_VHT_NSS_MAX];
+
+               /* Filter out rates that the STA does not support */
+               *mask &= sta->supp_rates[sband->band];
+               for (i = 0; i < sizeof(mcs_mask); i++)
+                       mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
+
+               sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
+               ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask);
+               for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+                       vht_mask[i] &= sta_vht_mask[i];
+       }
+
+       return true;
+}
+
+static void
+rate_control_apply_mask_ratetbl(struct sta_info *sta,
+                               struct ieee80211_supported_band *sband,
+                               struct ieee80211_sta_rates *rates)
+{
+       int i;
+       u32 mask;
+       u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
+       u16 vht_mask[NL80211_VHT_NSS_MAX];
+       enum nl80211_chan_width chan_width;
+
+       if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask,
+                                  mcs_mask, vht_mask))
+               return;
+
+       chan_width = sta->sdata->vif.bss_conf.chandef.width;
+       for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
+               if (rates->rate[i].idx < 0)
+                       break;
+
+               rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags,
+                                   sband, chan_width, mask, mcs_mask,
+                                   vht_mask);
+       }
+}
+
 static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
                                    struct ieee80211_sta *sta,
                                    struct ieee80211_supported_band *sband,
-                                   struct ieee80211_tx_info *info,
                                    struct ieee80211_tx_rate *rates,
                                    int max_rates)
 {
        enum nl80211_chan_width chan_width;
        u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
-       bool has_mcs_mask;
        u32 mask;
-       u32 rate_flags;
+       u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX];
        int i;
 
        /*
@@ -588,30 +771,10 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
         * default mask (allow all rates) is used to save some processing for
         * the common case.
         */
-       mask = sdata->rc_rateidx_mask[info->band];
-       has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
-       rate_flags =
-               ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
-       for (i = 0; i < sband->n_bitrates; i++)
-               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
-                       mask &= ~BIT(i);
-
-       if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
+       if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask,
+                                  vht_mask))
                return;
 
-       if (has_mcs_mask)
-               memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
-                      sizeof(mcs_mask));
-       else
-               memset(mcs_mask, 0xff, sizeof(mcs_mask));
-
-       if (sta) {
-               /* Filter out rates that the STA does not support */
-               mask &= sta->supp_rates[info->band];
-               for (i = 0; i < sizeof(mcs_mask); i++)
-                       mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
-       }
-
        /*
         * Make sure the rate index selected for each TX rate is
         * included in the configured mask and change the rate indexes
@@ -623,8 +786,10 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
                if (rates[i].idx < 0)
                        break;
 
-               rate_idx_match_mask(&rates[i], sband, chan_width, mask,
-                                   mcs_mask);
+               rate_flags = rates[i].flags;
+               rate_idx_match_mask(&rates[i].idx, &rate_flags, sband,
+                                   chan_width, mask, mcs_mask, vht_mask);
+               rates[i].flags = rate_flags;
        }
 }
 
@@ -648,7 +813,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
        sband = sdata->local->hw.wiphy->bands[info->band];
 
        if (ieee80211_is_data(hdr->frame_control))
-               rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
+               rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
 
        if (dest[0].idx < 0)
                __rate_control_send_low(&sdata->local->hw, sband, sta, info,
@@ -705,7 +870,10 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
 {
        struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
        struct ieee80211_sta_rates *old;
+       struct ieee80211_supported_band *sband;
 
+       sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)];
+       rate_control_apply_mask_ratetbl(sta, sband, rates);
        /*
         * mac80211 guarantees that this function will not be called
         * concurrently, so the following RCU access is safe, even without
index 25c9be5dd7fd811b32d13c792cf50c661f6f1e44..624fe5b81615e6afa04138cfdb375c8894d133c0 100644 (file)
@@ -71,64 +71,10 @@ rate_control_tx_status_noskb(struct ieee80211_local *local,
        spin_unlock_bh(&sta->rate_ctrl_lock);
 }
 
-static inline void rate_control_rate_init(struct sta_info *sta)
-{
-       struct ieee80211_local *local = sta->sdata->local;
-       struct rate_control_ref *ref = sta->rate_ctrl;
-       struct ieee80211_sta *ista = &sta->sta;
-       void *priv_sta = sta->rate_ctrl_priv;
-       struct ieee80211_supported_band *sband;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-
-       ieee80211_sta_set_rx_nss(sta);
-
-       if (!ref)
-               return;
-
-       rcu_read_lock();
-
-       chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
-       if (WARN_ON(!chanctx_conf)) {
-               rcu_read_unlock();
-               return;
-       }
-
-       sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
-
-       spin_lock_bh(&sta->rate_ctrl_lock);
-       ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
-                           priv_sta);
-       spin_unlock_bh(&sta->rate_ctrl_lock);
-       rcu_read_unlock();
-       set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
-}
-
-static inline void rate_control_rate_update(struct ieee80211_local *local,
+void rate_control_rate_init(struct sta_info *sta);
+void rate_control_rate_update(struct ieee80211_local *local,
                                    struct ieee80211_supported_band *sband,
-                                   struct sta_info *sta, u32 changed)
-{
-       struct rate_control_ref *ref = local->rate_ctrl;
-       struct ieee80211_sta *ista = &sta->sta;
-       void *priv_sta = sta->rate_ctrl_priv;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-
-       if (ref && ref->ops->rate_update) {
-               rcu_read_lock();
-
-               chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
-               if (WARN_ON(!chanctx_conf)) {
-                       rcu_read_unlock();
-                       return;
-               }
-
-               spin_lock_bh(&sta->rate_ctrl_lock);
-               ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
-                                     ista, priv_sta, changed);
-               spin_unlock_bh(&sta->rate_ctrl_lock);
-               rcu_read_unlock();
-       }
-       drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
-}
+                                   struct sta_info *sta, u32 changed);
 
 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
                                           struct sta_info *sta, gfp_t gfp)
index 247552a7f6c2f23a1e4bc89b647d8d37680bf2c3..3ece7d1034c81ae8749cada074fbebecbe06d57f 100644 (file)
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
 static inline void
 minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
 {
-       int j = MAX_THR_RATES;
-       struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+       int j;
+       struct minstrel_rate_stats *tmp_mrs;
        struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
 
-       while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
-              minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
-               j--;
+       for (j = MAX_THR_RATES; j > 0; --j) {
                tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+               if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+                   minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+                       break;
        }
 
        if (j < MAX_THR_RATES - 1)
index 543b672335353817334b407fbfc1ff3574625ee6..3928dbd24e257e68627aa977cc54a19aaa996339 100644 (file)
@@ -867,7 +867,13 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
        else
                idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
 
-       if (offset > 0) {
+       /* enable RTS/CTS if needed:
+        *  - if station is in dynamic SMPS (and streams > 1)
+        *  - for fallback rates, to increase chances of getting through
+        */
+       if (offset > 0 &&
+           (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
+            group->streams > 1)) {
                ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
                flags |= IEEE80211_TX_RC_USE_RTS_CTS;
        }
index 5dae166cb7f56b7cb9d9097496db9e8fa8d755ce..5bc0b88d9eb1331a0dd47aab05ffa01c7a0bc845 100644 (file)
@@ -42,6 +42,51 @@ static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
        u64_stats_update_end(&tstats->syncp);
 }
 
+static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
+                              enum nl80211_iftype type)
+{
+       __le16 fc = hdr->frame_control;
+
+       if (ieee80211_is_data(fc)) {
+               if (len < 24) /* drop incorrect hdr len (data) */
+                       return NULL;
+
+               if (ieee80211_has_a4(fc))
+                       return NULL;
+               if (ieee80211_has_tods(fc))
+                       return hdr->addr1;
+               if (ieee80211_has_fromds(fc))
+                       return hdr->addr2;
+
+               return hdr->addr3;
+       }
+
+       if (ieee80211_is_mgmt(fc)) {
+               if (len < 24) /* drop incorrect hdr len (mgmt) */
+                       return NULL;
+               return hdr->addr3;
+       }
+
+       if (ieee80211_is_ctl(fc)) {
+               if (ieee80211_is_pspoll(fc))
+                       return hdr->addr1;
+
+               if (ieee80211_is_back_req(fc)) {
+                       switch (type) {
+                       case NL80211_IFTYPE_STATION:
+                               return hdr->addr2;
+                       case NL80211_IFTYPE_AP:
+                       case NL80211_IFTYPE_AP_VLAN:
+                               return hdr->addr1;
+                       default:
+                               break; /* fall through to the return */
+                       }
+               }
+       }
+
+       return NULL;
+}
+
 /*
  * monitor mode reception
  *
@@ -77,8 +122,7 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
        hdr = (void *)(skb->data + rtap_vendor_space);
 
        if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
-                           RX_FLAG_FAILED_PLCP_CRC |
-                           RX_FLAG_AMPDU_IS_ZEROLEN))
+                           RX_FLAG_FAILED_PLCP_CRC))
                return true;
 
        if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -346,10 +390,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                        cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
                put_unaligned_le32(status->ampdu_reference, pos);
                pos += 4;
-               if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
-                       flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
-               if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
-                       flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
                if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
                        flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
                if (status->flag & RX_FLAG_AMPDU_IS_LAST)
@@ -1093,11 +1133,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 
-       if (unlikely(rx->skb->len < 16)) {
-               I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
-               return RX_DROP_MONITOR;
-       }
-
        /* Drop disallowed frame classes based on STA auth/assoc state;
         * IEEE 802.11, Chap 5.5.
         *
@@ -1240,22 +1275,22 @@ static void sta_ps_end(struct sta_info *sta)
        ieee80211_sta_ps_deliver_wakeup(sta);
 }
 
-int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
 {
-       struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+       struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
        bool in_ps;
 
-       WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS));
+       WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
 
        /* Don't let the same PS state be set twice */
-       in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
+       in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
        if ((start && in_ps) || (!start && !in_ps))
                return -EINVAL;
 
        if (start)
-               sta_ps_start(sta_inf);
+               sta_ps_start(sta);
        else
-               sta_ps_end(sta_inf);
+               sta_ps_end(sta);
 
        return 0;
 }
@@ -1393,7 +1428,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
        sta->rx_bytes += rx->skb->len;
        if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
                sta->last_signal = status->signal;
-               ewma_add(&sta->avg_signal, -status->signal);
+               ewma_signal_add(&sta->avg_signal, -status->signal);
        }
 
        if (status->chains) {
@@ -1405,7 +1440,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                                continue;
 
                        sta->chain_signal_last[i] = signal;
-                       ewma_add(&sta->chain_signal_avg[i], -signal);
+                       ewma_signal_add(&sta->chain_signal_avg[i], -signal);
                }
        }
 
@@ -1647,7 +1682,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
                if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
                        return RX_DROP_MONITOR;
 
-               rx->key->tx_rx_count++;
                /* TODO: add threshold stuff again */
        } else {
                return RX_DROP_MONITOR;
@@ -1883,7 +1917,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
 
        /* Complete frame has been reassembled - process it now */
        status = IEEE80211_SKB_RXCB(rx->skb);
-       status->rx_flags |= IEEE80211_RX_FRAGMENTED;
 
  out:
        ieee80211_led_rx(rx->local);
@@ -2108,9 +2141,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
                /* deliver to local stack */
                skb->protocol = eth_type_trans(skb, dev);
                memset(skb->cb, 0, sizeof(skb->cb));
-               if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
-                   rx->local->napi)
-                       napi_gro_receive(rx->local->napi, skb);
+               if (rx->napi)
+                       napi_gro_receive(rx->napi, skb);
                else
                        netif_receive_skb(skb);
        }
@@ -2378,9 +2410,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
                    tf->category == WLAN_CATEGORY_TDLS &&
                    (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
                     tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
-                       rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW;
-                       skb_queue_tail(&sdata->skb_queue, rx->skb);
-                       ieee80211_queue_work(&rx->local->hw, &sdata->work);
+                       skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
+                       schedule_work(&local->tdls_chsw_work);
                        if (rx->sta)
                                rx->sta->rx_packets++;
 
@@ -3004,7 +3035,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
        return RX_QUEUED;
 }
 
-/* TODO: use IEEE80211_RX_FRAGMENTED */
 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
                                        struct ieee80211_rate *rate)
 {
@@ -3216,7 +3246,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
                /* This is OK -- must be QoS data frame */
                .security_idx = tid,
                .seqno_idx = tid,
-               .flags = IEEE80211_RX_REORDER_TIMER,
+               .napi = NULL, /* must be NULL to not have races */
        };
        struct tid_ampdu_rx *tid_agg_rx;
 
@@ -3286,7 +3316,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
        case NL80211_IFTYPE_OCB:
                if (!bssid)
                        return false;
-               if (ieee80211_is_beacon(hdr->frame_control))
+               if (!ieee80211_is_data_present(hdr->frame_control))
                        return false;
                if (!is_broadcast_ether_addr(bssid))
                        return false;
@@ -3393,7 +3423,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
  * be called with rcu_read_lock protection.
  */
 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
-                                        struct sk_buff *skb)
+                                        struct sk_buff *skb,
+                                        struct napi_struct *napi)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_sub_if_data *sdata;
@@ -3409,6 +3440,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
        memset(&rx, 0, sizeof(rx));
        rx.skb = skb;
        rx.local = local;
+       rx.napi = napi;
 
        if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
                I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
@@ -3510,7 +3542,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
  * This is the receive path handler. It is called by a low level driver when an
  * 802.11 MPDU is received from the hardware.
  */
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+                      struct napi_struct *napi)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_rate *rate = NULL;
@@ -3609,7 +3642,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
        ieee80211_tpt_led_trig_rx(local,
                        ((struct ieee80211_hdr *)skb->data)->frame_control,
                        skb->len);
-       __ieee80211_rx_handle_packet(hw, skb);
+       __ieee80211_rx_handle_packet(hw, skb, napi);
 
        rcu_read_unlock();
 
@@ -3617,7 +3650,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
  drop:
        kfree_skb(skb);
 }
-EXPORT_SYMBOL(ieee80211_rx);
+EXPORT_SYMBOL(ieee80211_rx_napi);
 
 /* This is a version of the rx handler that can be called from hard irq
  * context. Post the skb on the queue and schedule the tasklet */
index 666ddac3c87c67a63ed685efe83221cf7d52428d..64f1936350c66e48fb076beb7b418d55f42c8753 100644 (file)
@@ -68,7 +68,7 @@ static const struct rhashtable_params sta_rht_params = {
        .nelem_hint = 3, /* start small */
        .automatic_shrinking = true,
        .head_offset = offsetof(struct sta_info, hash_node),
-       .key_offset = offsetof(struct sta_info, sta.addr),
+       .key_offset = offsetof(struct sta_info, addr),
        .key_len = ETH_ALEN,
        .hashfn = sta_addr_hash,
        .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
@@ -249,6 +249,9 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
        if (sta->sta.txq[0])
                kfree(to_txq_info(sta->sta.txq[0]));
        kfree(rcu_dereference_raw(sta->sta.rates));
+#ifdef CONFIG_MAC80211_MESH
+       kfree(sta->mesh);
+#endif
        kfree(sta);
 }
 
@@ -313,13 +316,19 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
        mutex_init(&sta->ampdu_mlme.mtx);
 #ifdef CONFIG_MAC80211_MESH
-       spin_lock_init(&sta->plink_lock);
-       if (ieee80211_vif_is_mesh(&sdata->vif) &&
-           !sdata->u.mesh.user_mpm)
-               init_timer(&sta->plink_timer);
-       sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+       if (ieee80211_vif_is_mesh(&sdata->vif)) {
+               sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
+               if (!sta->mesh)
+                       goto free;
+               spin_lock_init(&sta->mesh->plink_lock);
+               if (ieee80211_vif_is_mesh(&sdata->vif) &&
+                   !sdata->u.mesh.user_mpm)
+                       init_timer(&sta->mesh->plink_timer);
+               sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+       }
 #endif
 
+       memcpy(sta->addr, addr, ETH_ALEN);
        memcpy(sta->sta.addr, addr, ETH_ALEN);
        sta->local = local;
        sta->sdata = sdata;
@@ -332,9 +341,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        ktime_get_ts(&uptime);
        sta->last_connected = uptime.tv_sec;
-       ewma_init(&sta->avg_signal, 1024, 8);
+       ewma_signal_init(&sta->avg_signal);
        for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
-               ewma_init(&sta->chain_signal_avg[i], 1024, 8);
+               ewma_signal_init(&sta->chain_signal_avg[i]);
 
        if (local->ops->wake_tx_queue) {
                void *txq_data;
@@ -405,6 +414,9 @@ free_txq:
        if (sta->sta.txq[0])
                kfree(to_txq_info(sta->sta.txq[0]));
 free:
+#ifdef CONFIG_MAC80211_MESH
+       kfree(sta->mesh);
+#endif
        kfree(sta);
        return NULL;
 }
@@ -623,7 +635,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
        bool indicate_tim = false;
        u8 ignore_for_tim = sta->sta.uapsd_queues;
        int ac;
-       u16 id;
+       u16 id = sta->sta.aid;
 
        if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
            sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -631,12 +643,9 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
                        return;
 
                ps = &sta->sdata->bss->ps;
-               id = sta->sta.aid;
 #ifdef CONFIG_MAC80211_MESH
        } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
                ps = &sta->sdata->u.mesh.ps;
-               /* TIM map only for 1 <= PLID <= IEEE80211_MAX_AID */
-               id = sta->plid % (IEEE80211_MAX_AID + 1);
 #endif
        } else {
                return;
@@ -1887,7 +1896,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                }
 
                if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
-                       sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
+                       sinfo->signal_avg =
+                               (s8) -ewma_signal_read(&sta->avg_signal);
                        sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
                }
        }
@@ -1902,7 +1912,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
                        sinfo->chain_signal[i] = sta->chain_signal_last[i];
                        sinfo->chain_signal_avg[i] =
-                               (s8) -ewma_read(&sta->chain_signal_avg[i]);
+                               (s8) -ewma_signal_read(&sta->chain_signal_avg[i]);
                }
        }
 
@@ -1956,16 +1966,16 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                                 BIT(NL80211_STA_INFO_PEER_PM) |
                                 BIT(NL80211_STA_INFO_NONPEER_PM);
 
-               sinfo->llid = sta->llid;
-               sinfo->plid = sta->plid;
-               sinfo->plink_state = sta->plink_state;
+               sinfo->llid = sta->mesh->llid;
+               sinfo->plid = sta->mesh->plid;
+               sinfo->plink_state = sta->mesh->plink_state;
                if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
                        sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET);
-                       sinfo->t_offset = sta->t_offset;
+                       sinfo->t_offset = sta->mesh->t_offset;
                }
-               sinfo->local_pm = sta->local_pm;
-               sinfo->peer_pm = sta->peer_pm;
-               sinfo->nonpeer_pm = sta->nonpeer_pm;
+               sinfo->local_pm = sta->mesh->local_pm;
+               sinfo->peer_pm = sta->mesh->peer_pm;
+               sinfo->nonpeer_pm = sta->mesh->nonpeer_pm;
 #endif
        }
 
index 226f8ca47ad6737ff54c6dc3a8bc4036189e9fbb..b087c71ff7fe4ef99ed3869cc2c7eecc174b95c0 100644 (file)
@@ -53,6 +53,8 @@
  * @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching
  * @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this
  *     TDLS peer
+ * @WLAN_STA_TDLS_WIDER_BW: This TDLS peer supports working on a wider bw on
+ *     the BSS base channel.
  * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
  *     keeping station in power-save mode, reply when the driver
  *     unblocks the station.
@@ -84,6 +86,7 @@ enum ieee80211_sta_info_flags {
        WLAN_STA_TDLS_INITIATOR,
        WLAN_STA_TDLS_CHAN_SWITCH,
        WLAN_STA_TDLS_OFF_CHANNEL,
+       WLAN_STA_TDLS_WIDER_BW,
        WLAN_STA_UAPSD,
        WLAN_STA_SP,
        WLAN_STA_4ADDR_EVENT,
@@ -269,6 +272,56 @@ struct ieee80211_fast_tx {
        struct rcu_head rcu_head;
 };
 
+/**
+ * struct mesh_sta - mesh STA information
+ * @plink_lock: serialize access to plink fields
+ * @llid: Local link ID
+ * @plid: Peer link ID
+ * @aid: local aid supplied by peer
+ * @reason: Cancel reason on PLINK_HOLDING state
+ * @plink_retries: Retries in establishment
+ * @plink_state: peer link state
+ * @plink_timeout: timeout of peer link
+ * @plink_timer: peer link watch timer
+ * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ *     calculating clockdrift
+ * @local_pm: local link-specific power save mode
+ * @peer_pm: peer-specific power save mode towards local STA
+ * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @processed_beacon: set to true after peer rates and capabilities are
+ *     processed
+ * @fail_avg: moving percentage of failed MSDUs
+ */
+struct mesh_sta {
+       struct timer_list plink_timer;
+
+       s64 t_offset;
+       s64 t_offset_setpoint;
+
+       spinlock_t plink_lock;
+       u16 llid;
+       u16 plid;
+       u16 aid;
+       u16 reason;
+       u8 plink_retries;
+
+       bool processed_beacon;
+
+       enum nl80211_plink_state plink_state;
+       u32 plink_timeout;
+
+       /* mesh power save */
+       enum nl80211_mesh_power_mode local_pm;
+       enum nl80211_mesh_power_mode peer_pm;
+       enum nl80211_mesh_power_mode nonpeer_pm;
+
+       /* moving percentage of failed MSDUs */
+       unsigned int fail_avg;
+};
+
+DECLARE_EWMA(signal, 1024, 8)
+
 /**
  * struct sta_info - STA information
  *
@@ -278,12 +331,13 @@ struct ieee80211_fast_tx {
  * @list: global linked list entry
  * @free_list: list entry for keeping track of stations to free
  * @hash_node: hash node for rhashtable
+ * @addr: station's MAC address - duplicated from public part to
+ *     let the hash table work with just a single cacheline
  * @local: pointer to the global information
  * @sdata: virtual interface this station belongs to
  * @ptk: peer keys negotiated with this station, if any
  * @ptk_idx: last installed peer key index
  * @gtk: group keys negotiated with this station, if any
- * @gtk_idx: last installed group key index
  * @rate_ctrl: rate control algorithm reference
  * @rate_ctrl_lock: spinlock used to protect rate control data
  *     (data inside the algorithm, so serializes calls there)
@@ -318,30 +372,17 @@ struct ieee80211_fast_tx {
  * @last_signal: signal of last received frame from this STA
  * @avg_signal: moving average of signal of received frames from this STA
  * @last_ack_signal: signal of last received Ack frame from this STA
- * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
+ * @last_seq_ctrl: last received seq/frag number from this STA (per TID
+ *     plus one for non-QoS frames)
  * @tx_filtered_count: number of frames the hardware filtered for this STA
  * @tx_retry_failed: number of frames that failed retry
  * @tx_retry_count: total number of retries for frames to this STA
- * @fail_avg: moving percentage of failed MSDUs
  * @tx_packets: number of RX/TX MSDUs
  * @tx_bytes: number of bytes transmitted to this STA
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @ampdu_mlme: A-MPDU state machine state
  * @timer_to_tid: identity mapping to ID timers
- * @plink_lock: serialize access to plink fields
- * @llid: Local link ID
- * @plid: Peer link ID
- * @reason: Cancel reason on PLINK_HOLDING state
- * @plink_retries: Retries in establishment
- * @plink_state: peer link state
- * @plink_timeout: timeout of peer link
- * @plink_timer: peer link watch timer
- * @t_offset: timing offset relative to this host
- * @t_offset_setpoint: reference timing offset of this sta to be used when
- *     calculating clockdrift
- * @local_pm: local link-specific power save mode
- * @peer_pm: peer-specific power save mode towards local STA
- * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @mesh: mesh STA information
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -369,19 +410,19 @@ struct ieee80211_fast_tx {
  * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
  *     entry for non-QoS frames
  * @fast_tx: TX fastpath information
- * @processed_beacon: set to true after peer rates and capabilities are
- *     processed
+ * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
+ *     the BSS one.
  */
 struct sta_info {
        /* General information, mostly static */
        struct list_head list, free_list;
        struct rcu_head rcu_head;
        struct rhash_head hash_node;
+       u8 addr[ETH_ALEN];
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
        struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
        struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS];
-       u8 gtk_idx;
        u8 ptk_idx;
        struct rate_control_ref *rate_ctrl;
        void *rate_ctrl_priv;
@@ -390,6 +431,10 @@ struct sta_info {
 
        struct ieee80211_fast_tx __rcu *fast_tx;
 
+#ifdef CONFIG_MAC80211_MESH
+       struct mesh_sta *mesh;
+#endif
+
        struct work_struct drv_deliver_wk;
 
        u16 listen_interval;
@@ -419,12 +464,12 @@ struct sta_info {
        unsigned long rx_fragments;
        unsigned long rx_dropped;
        int last_signal;
-       struct ewma avg_signal;
+       struct ewma_signal avg_signal;
        int last_ack_signal;
 
        u8 chains;
        s8 chain_signal_last[IEEE80211_MAX_CHAINS];
-       struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS];
+       struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
 
        /* Plus 1 for non-QoS frames */
        __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
@@ -432,8 +477,6 @@ struct sta_info {
        /* Updated from TX status path only, no locking requirements */
        unsigned long tx_filtered_count;
        unsigned long tx_retry_failed, tx_retry_count;
-       /* moving percentage of failed MSDUs */
-       unsigned int fail_avg;
 
        /* Updated from TX path only, no locking requirements */
        u64 tx_packets[IEEE80211_NUM_ACS];
@@ -455,29 +498,6 @@ struct sta_info {
        struct sta_ampdu_mlme ampdu_mlme;
        u8 timer_to_tid[IEEE80211_NUM_TIDS];
 
-#ifdef CONFIG_MAC80211_MESH
-       /*
-        * Mesh peer link attributes, protected by plink_lock.
-        * TODO: move to a sub-structure that is referenced with pointer?
-        */
-       spinlock_t plink_lock;
-       u16 llid;
-       u16 plid;
-       u16 reason;
-       u8 plink_retries;
-       enum nl80211_plink_state plink_state;
-       u32 plink_timeout;
-       struct timer_list plink_timer;
-
-       s64 t_offset;
-       s64 t_offset_setpoint;
-       /* mesh power save */
-       enum nl80211_mesh_power_mode local_pm;
-       enum nl80211_mesh_power_mode peer_pm;
-       enum nl80211_mesh_power_mode nonpeer_pm;
-       bool processed_beacon;
-#endif
-
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct sta_info_debugfsdentries {
                struct dentry *dir;
@@ -498,6 +518,8 @@ struct sta_info {
 
        u8 reserved_tid;
 
+       struct cfg80211_chan_def tdls_chandef;
+
        /* keep last! */
        struct ieee80211_sta sta;
 };
@@ -505,7 +527,7 @@ struct sta_info {
 static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
 {
 #ifdef CONFIG_MAC80211_MESH
-       return sta->plink_state;
+       return sta->mesh->plink_state;
 #endif
        return NL80211_PLINK_LISTEN;
 }
@@ -608,7 +630,7 @@ u32 sta_addr_hash(const void *key, u32 length, u32 seed);
                               _sta_bucket_idx(tbl, _addr),             \
                               hash_node)                               \
        /* compare address and run code only if it matches */           \
-       if (ether_addr_equal(_sta->sta.addr, (_addr)))
+       if (ether_addr_equal(_sta->addr, (_addr)))
 
 /*
  * Get STA info by index, BROKEN!
index 45628f37c083aa72575fcc0a0241b1aefc0cb5ad..8ba5832435095f10e94f782e07d92a4f742cad3a 100644 (file)
@@ -515,7 +515,7 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
 
                if (!sdata) {
                        skb->dev = NULL;
-               } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
+               } else {
                        unsigned int hdr_size =
                                ieee80211_hdrlen(hdr->frame_control);
 
@@ -529,9 +529,6 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
                                ieee80211_mgd_conn_tx_status(sdata,
                                                             hdr->frame_control,
                                                             acked);
-               } else {
-                       /* we assign ack frame ID for the others */
-                       WARN_ON(1);
                }
 
                rcu_read_unlock();
index 8db6e2994bbc59bb7cf38c36848a7d92b8e4a0e5..aee701a5649e59ebd03ef300f25e33eadfc280d5 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2014, Intel Corporation
  * Copyright 2014  Intel Mobile Communications GmbH
+ * Copyright 2015  Intel Deutschland GmbH
  *
  * This file is GPLv2 as found in COPYING.
  */
@@ -11,6 +12,7 @@
 #include <linux/ieee80211.h>
 #include <linux/log2.h>
 #include <net/cfg80211.h>
+#include <linux/rtnetlink.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 
@@ -35,20 +37,28 @@ void ieee80211_tdls_peer_del_work(struct work_struct *wk)
        mutex_unlock(&local->mtx);
 }
 
-static void ieee80211_tdls_add_ext_capab(struct ieee80211_local *local,
+static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
                                         struct sk_buff *skb)
 {
-       u8 *pos = (void *)skb_put(skb, 7);
+       struct ieee80211_local *local = sdata->local;
        bool chan_switch = local->hw.wiphy->features &
                           NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+       bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW);
+       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+       struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+       bool vht = sband && sband->vht_cap.vht_supported;
+       u8 *pos = (void *)skb_put(skb, 10);
 
        *pos++ = WLAN_EID_EXT_CAPABILITY;
-       *pos++ = 5; /* len */
+       *pos++ = 8; /* len */
        *pos++ = 0x0;
        *pos++ = 0x0;
        *pos++ = 0x0;
        *pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
        *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+       *pos++ = 0;
+       *pos++ = 0;
+       *pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0;
 }
 
 static u8
@@ -283,6 +293,60 @@ static void ieee80211_tdls_add_wmm_param_ie(struct ieee80211_sub_if_data *sdata,
        }
 }
 
+static void
+ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
+                                  struct sta_info *sta)
+{
+       /* IEEE802.11ac-2013 Table E-4 */
+       u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
+       struct cfg80211_chan_def uc = sta->tdls_chandef;
+       enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
+       int i;
+
+       /* only support upgrading non-narrow channels up to 80Mhz */
+       if (max_width == NL80211_CHAN_WIDTH_5 ||
+           max_width == NL80211_CHAN_WIDTH_10)
+               return;
+
+       if (max_width > NL80211_CHAN_WIDTH_80)
+               max_width = NL80211_CHAN_WIDTH_80;
+
+       if (uc.width == max_width)
+               return;
+       /*
+        * Channel usage constrains in the IEEE802.11ac-2013 specification only
+        * allow expanding a 20MHz channel to 80MHz in a single way. In
+        * addition, there are no 40MHz allowed channels that are not part of
+        * the allowed 80MHz range in the 5GHz spectrum (the relevant one here).
+        */
+       for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
+               if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
+                       uc.center_freq1 = centers_80mhz[i];
+                       uc.width = NL80211_CHAN_WIDTH_80;
+                       break;
+               }
+
+       if (!uc.center_freq1)
+               return;
+
+       /* proceed to downgrade the chandef until usable or the same */
+       while (uc.width > max_width &&
+              !cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
+                                       &uc, sdata->wdev.iftype))
+               ieee80211_chandef_downgrade(&uc);
+
+       if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
+               tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
+                        sta->tdls_chandef.width, uc.width);
+
+               /*
+                * the station is not yet authorized when BW upgrade is done,
+                * locking is not required
+                */
+               sta->tdls_chandef = uc;
+       }
+}
+
 static void
 ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                                   struct sk_buff *skb, const u8 *peer,
@@ -320,7 +384,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                offset = noffset;
        }
 
-       ieee80211_tdls_add_ext_capab(local, skb);
+       ieee80211_tdls_add_ext_capab(sdata, skb);
 
        /* add the QoS element if we support it */
        if (local->hw.queues >= IEEE80211_NUM_ACS &&
@@ -350,15 +414,17 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                offset = noffset;
        }
 
-       rcu_read_lock();
+       mutex_lock(&local->sta_mtx);
 
        /* we should have the peer STA if we're already responding */
        if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
                sta = sta_info_get(sdata, peer);
                if (WARN_ON_ONCE(!sta)) {
-                       rcu_read_unlock();
+                       mutex_unlock(&local->sta_mtx);
                        return;
                }
+
+               sta->tdls_chandef = sdata->vif.bss_conf.chandef;
        }
 
        ieee80211_tdls_add_oper_classes(sdata, skb);
@@ -384,10 +450,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
                ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
        } else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
                   ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
-               /* disable SMPS in TDLS responder */
-               sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
-                                       << IEEE80211_HT_CAP_SM_PS_SHIFT;
-
                /* the peer caps are already intersected with our own */
                memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
 
@@ -448,9 +510,16 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
 
                pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
                ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+
+               /*
+                * if both peers support WIDER_BW, we can expand the chandef to
+                * a wider compatible one, up to 80MHz
+                */
+               if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+                       ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
        }
 
-       rcu_read_unlock();
+       mutex_unlock(&local->sta_mtx);
 
        /* add any remaining IEs */
        if (extra_ies_len) {
@@ -474,15 +543,17 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
        enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
        u8 *pos;
 
-       rcu_read_lock();
+       mutex_lock(&local->sta_mtx);
 
        sta = sta_info_get(sdata, peer);
        ap_sta = sta_info_get(sdata, ifmgd->bssid);
        if (WARN_ON_ONCE(!sta || !ap_sta)) {
-               rcu_read_unlock();
+               mutex_unlock(&local->sta_mtx);
                return;
        }
 
+       sta->tdls_chandef = sdata->vif.bss_conf.chandef;
+
        /* add any custom IEs that go before the QoS IE */
        if (extra_ies_len) {
                static const u8 before_qos[] = {
@@ -530,12 +601,19 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
 
        /* only include VHT-operation if not on the 2.4GHz band */
        if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+               /*
+                * if both peers support WIDER_BW, we can expand the chandef to
+                * a wider compatible one, up to 80MHz
+                */
+               if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+                       ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
+
                pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
                ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
-                                           &sdata->vif.bss_conf.chandef);
+                                           &sta->tdls_chandef);
        }
 
-       rcu_read_unlock();
+       mutex_unlock(&local->sta_mtx);
 
        /* add any remaining IEs */
        if (extra_ies_len) {
@@ -784,7 +862,7 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata,
                               max(sizeof(struct ieee80211_mgmt),
                                   sizeof(struct ieee80211_tdls_data)) +
                               50 + /* supported rates */
-                              7 + /* ext capab */
+                              10 + /* ext capab */
                               26 + /* max(WMM-info, WMM-param) */
                               2 + max(sizeof(struct ieee80211_ht_cap),
                                       sizeof(struct ieee80211_ht_operation)) +
@@ -983,8 +1061,17 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
+       enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode;
        int ret;
 
+       /* don't support setup with forced SMPS mode that's not off */
+       if (smps_mode != IEEE80211_SMPS_AUTOMATIC &&
+           smps_mode != IEEE80211_SMPS_OFF) {
+               tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n",
+                        smps_mode);
+               return -ENOTSUPP;
+       }
+
        mutex_lock(&local->mtx);
 
        /* we don't support concurrent TDLS peer setups */
@@ -1146,6 +1233,22 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
        return ret;
 }
 
+static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *conf;
+       struct ieee80211_chanctx *ctx;
+
+       mutex_lock(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (conf) {
+               ctx = container_of(conf, struct ieee80211_chanctx, conf);
+               ieee80211_recalc_chanctx_chantype(local, ctx);
+       }
+       mutex_unlock(&local->chanctx_mtx);
+}
+
 int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                        const u8 *peer, enum nl80211_tdls_operation oper)
 {
@@ -1182,6 +1285,8 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                        break;
                }
 
+               iee80211_tdls_recalc_chanctx(sdata);
+
                rcu_read_lock();
                sta = sta_info_get(sdata, peer);
                if (!sta) {
@@ -1213,6 +1318,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                ieee80211_flush_queues(local, sdata, false);
 
                ret = sta_info_destroy_addr(sdata, peer);
+               iee80211_tdls_recalc_chanctx(sdata);
                break;
        default:
                ret = -ENOTSUPP;
@@ -1224,6 +1330,10 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                eth_zero_addr(sdata->u.mgd.tdls_peer);
        }
 
+       if (ret == 0)
+               ieee80211_queue_work(&sdata->local->hw,
+                                    &sdata->u.mgd.request_smps_work);
+
        mutex_unlock(&local->mtx);
        return ret;
 }
@@ -1627,6 +1737,31 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
                return -EINVAL;
        }
 
+       if (!elems.sec_chan_offs) {
+               chan_type = NL80211_CHAN_HT20;
+       } else {
+               switch (elems.sec_chan_offs->sec_chan_offs) {
+               case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                       chan_type = NL80211_CHAN_HT40PLUS;
+                       break;
+               case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                       chan_type = NL80211_CHAN_HT40MINUS;
+                       break;
+               default:
+                       chan_type = NL80211_CHAN_HT20;
+                       break;
+               }
+       }
+
+       cfg80211_chandef_create(&chandef, chan, chan_type);
+
+       /* we will be active on the TDLS link */
+       if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef,
+                                          sdata->wdev.iftype)) {
+               tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n");
+               return -EINVAL;
+       }
+
        mutex_lock(&local->sta_mtx);
        sta = sta_info_get(sdata, tf->sa);
        if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
@@ -1647,27 +1782,15 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       if (!sta->sta.ht_cap.ht_supported) {
-               chan_type = NL80211_CHAN_NO_HT;
-       } else if (!elems.sec_chan_offs) {
-               chan_type = NL80211_CHAN_HT20;
-       } else {
-               switch (elems.sec_chan_offs->sec_chan_offs) {
-               case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-                       chan_type = NL80211_CHAN_HT40PLUS;
-                       break;
-               case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-                       chan_type = NL80211_CHAN_HT40MINUS;
-                       break;
-               default:
-                       chan_type = NL80211_CHAN_HT20;
-                       break;
-               }
+       /* peer should have known better */
+       if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs &&
+           elems.sec_chan_offs->sec_chan_offs) {
+               tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n");
+               ret = -ENOTSUPP;
+               goto out;
        }
 
-       cfg80211_chandef_create(&chandef, chan, chan_type);
        params.chandef = &chandef;
-
        params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time);
        params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout);
 
@@ -1691,12 +1814,15 @@ out:
        return ret;
 }
 
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
-                                          struct sk_buff *skb)
+static void
+ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+                                     struct sk_buff *skb)
 {
        struct ieee80211_tdls_data *tf = (void *)skb->data;
        struct wiphy *wiphy = sdata->local->hw.wiphy;
 
+       ASSERT_RTNL();
+
        /* make sure the driver supports it */
        if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
                return;
@@ -1720,3 +1846,47 @@ void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
                return;
        }
 }
+
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+{
+       struct sta_info *sta;
+       u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+               if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+                       continue;
+
+               ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
+                                           NL80211_TDLS_TEARDOWN, reason,
+                                           GFP_ATOMIC);
+       }
+       rcu_read_unlock();
+}
+
+void ieee80211_tdls_chsw_work(struct work_struct *wk)
+{
+       struct ieee80211_local *local =
+               container_of(wk, struct ieee80211_local, tdls_chsw_work);
+       struct ieee80211_sub_if_data *sdata;
+       struct sk_buff *skb;
+       struct ieee80211_tdls_data *tf;
+
+       rtnl_lock();
+       while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) {
+               tf = (struct ieee80211_tdls_data *)skb->data;
+               list_for_each_entry(sdata, &local->interfaces, list) {
+                       if (!ieee80211_sdata_running(sdata) ||
+                           sdata->vif.type != NL80211_IFTYPE_STATION ||
+                           !ether_addr_equal(tf->da, sdata->vif.addr))
+                               continue;
+
+                       ieee80211_process_tdls_channel_switch(sdata, skb);
+                       break;
+               }
+
+               kfree_skb(skb);
+       }
+       rtnl_unlock();
+}
index b8233505bf9fd3bb4945c126eebee069f2b94878..84e0e8c7fb236952dfc1cfcb23204623e80d7867 100644 (file)
@@ -311,9 +311,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
        if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
                return TX_CONTINUE;
 
-       if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
-               return TX_CONTINUE;
-
        if (tx->flags & IEEE80211_TX_PS_BUFFERED)
                return TX_CONTINUE;
 
@@ -610,7 +607,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
        if (tx->key) {
                bool skip_hw = false;
 
-               tx->key->tx_rx_count++;
                /* TODO: add threshold stuff again */
 
                switch (tx->key->conf.cipher) {
@@ -690,7 +686,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 
        txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
                    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
-                   tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
+                   tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+                   tx->sdata->vif.type == NL80211_IFTYPE_OCB);
 
        /* set up RTS protection if desired */
        if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -2777,7 +2774,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
                sdata->sequence_number += 0x10;
        }
 
-       sta->tx_msdu[tid]++;
+       if (skb_shinfo(skb)->gso_size)
+               sta->tx_msdu[tid] +=
+                       DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
+       else
+               sta->tx_msdu[tid]++;
 
        info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
@@ -3213,6 +3214,16 @@ static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
        rcu_read_unlock();
 }
 
+static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon)
+{
+       beacon->csa_current_counter--;
+
+       /* the counter should never reach 0 */
+       WARN_ON_ONCE(!beacon->csa_current_counter);
+
+       return beacon->csa_current_counter;
+}
+
 u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -3231,11 +3242,7 @@ u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
        if (!beacon)
                goto unlock;
 
-       beacon->csa_current_counter--;
-
-       /* the counter should never reach 0 */
-       WARN_ON_ONCE(!beacon->csa_current_counter);
-       count = beacon->csa_current_counter;
+       count = __ieee80211_csa_update_counter(beacon);
 
 unlock:
        rcu_read_unlock();
@@ -3335,7 +3342,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
                if (beacon) {
                        if (beacon->csa_counter_offsets[0]) {
                                if (!is_template)
-                                       ieee80211_csa_update_counter(vif);
+                                       __ieee80211_csa_update_counter(beacon);
 
                                ieee80211_set_csa(sdata, beacon);
                        }
@@ -3381,7 +3388,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
 
                if (beacon->csa_counter_offsets[0]) {
                        if (!is_template)
-                               ieee80211_csa_update_counter(vif);
+                               __ieee80211_csa_update_counter(beacon);
 
                        ieee80211_set_csa(sdata, beacon);
                }
@@ -3411,7 +3418,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
                                 * for now we leave it consistent with overall
                                 * mac80211's behavior.
                                 */
-                               ieee80211_csa_update_counter(vif);
+                               __ieee80211_csa_update_counter(beacon);
 
                        ieee80211_set_csa(sdata, beacon);
                }
index 43e5aadd7a894f04b7b2d7cf4b3d456ff7228400..1104421bc525598aae491c587f29eb4b385fc220 100644 (file)
@@ -47,55 +47,6 @@ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
 }
 EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
 
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-                       enum nl80211_iftype type)
-{
-       __le16 fc = hdr->frame_control;
-
-        /* drop ACK/CTS frames and incorrect hdr len (ctrl) */
-       if (len < 16)
-               return NULL;
-
-       if (ieee80211_is_data(fc)) {
-               if (len < 24) /* drop incorrect hdr len (data) */
-                       return NULL;
-
-               if (ieee80211_has_a4(fc))
-                       return NULL;
-               if (ieee80211_has_tods(fc))
-                       return hdr->addr1;
-               if (ieee80211_has_fromds(fc))
-                       return hdr->addr2;
-
-               return hdr->addr3;
-       }
-
-       if (ieee80211_is_mgmt(fc)) {
-               if (len < 24) /* drop incorrect hdr len (mgmt) */
-                       return NULL;
-               return hdr->addr3;
-       }
-
-       if (ieee80211_is_ctl(fc)) {
-               if (ieee80211_is_pspoll(fc))
-                       return hdr->addr1;
-
-               if (ieee80211_is_back_req(fc)) {
-                       switch (type) {
-                       case NL80211_IFTYPE_STATION:
-                               return hdr->addr2;
-                       case NL80211_IFTYPE_AP:
-                       case NL80211_IFTYPE_AP_VLAN:
-                               return hdr->addr1;
-                       default:
-                               break; /* fall through to the return */
-                       }
-               }
-       }
-
-       return NULL;
-}
-
 void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
 {
        struct sk_buff *skb;
@@ -752,7 +703,12 @@ EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif);
 
 struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
 {
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct ieee80211_sub_if_data *sdata;
+
+       if (!vif)
+               return NULL;
+
+       sdata = vif_to_sdata(vif);
 
        if (!ieee80211_sdata_running(sdata) ||
            !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
@@ -1709,6 +1665,7 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
        local->resuming = false;
        local->suspended = false;
        local->started = false;
+       local->in_reconfig = false;
 
        /* scheduled scan clearly can't be running any more, but tell
         * cfg80211 and clear local state
@@ -1759,16 +1716,24 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        struct ieee80211_sub_if_data *sched_scan_sdata;
        struct cfg80211_sched_scan_request *sched_scan_req;
        bool sched_scan_stopped = false;
+       bool suspended = local->suspended;
 
        /* nothing to do if HW shouldn't run */
        if (!local->open_count)
                goto wake_up;
 
 #ifdef CONFIG_PM
-       if (local->suspended)
+       if (suspended)
                local->resuming = true;
 
        if (local->wowlan) {
+               /*
+                * In the wowlan case, both mac80211 and the device
+                * are functional when the resume op is called, so
+                * clear local->suspended so the device could operate
+                * normally (e.g. pass rx frames).
+                */
+               local->suspended = false;
                res = drv_resume(local);
                local->wowlan = false;
                if (res < 0) {
@@ -1781,8 +1746,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                /*
                 * res is 1, which means the driver requested
                 * to go through a regular reset on wakeup.
+                * restore local->suspended in this case.
                 */
                reconfig_due_to_wowlan = true;
+               local->suspended = true;
        }
 #endif
 
@@ -1794,7 +1761,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
         */
        res = drv_start(local);
        if (res) {
-               if (local->suspended)
+               if (suspended)
                        WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
                else
                        WARN(1, "Hardware became unavailable during restart.\n");
@@ -2088,10 +2055,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
         * If this is for hw restart things are still running.
         * We may want to change that later, however.
         */
-       if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
+       if (local->open_count && (!suspended || reconfig_due_to_wowlan))
                drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
 
-       if (!local->suspended)
+       if (!suspended)
                return 0;
 
 #ifdef CONFIG_PM
index 80694d55db7404079212761ff23083bb9aa35169..834ccdbc74be1ccd518aaa952ad854810d94f4ca 100644 (file)
@@ -308,11 +308,15 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        enum ieee80211_sta_rx_bandwidth bw;
+       enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width;
 
-       bw = ieee80211_chan_width_to_rx_bw(sdata->vif.bss_conf.chandef.width);
-       bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+       bw = ieee80211_sta_cap_rx_bw(sta);
        bw = min(bw, sta->cur_max_bandwidth);
 
+       /* do not cap the BW of TDLS WIDER_BW peers by the bss */
+       if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+               bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+
        return bw;
 }
 
@@ -422,3 +426,29 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
        if (changed > 0)
                rate_control_rate_update(local, sband, sta, changed);
 }
+
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+                                    u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+       int i;
+       u16 mask, cap = le16_to_cpu(vht_cap);
+
+       for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+               mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
+               switch (mask) {
+               case IEEE80211_VHT_MCS_SUPPORT_0_7:
+                       vht_mask[i] = 0x00FF;
+                       break;
+               case IEEE80211_VHT_MCS_SUPPORT_0_8:
+                       vht_mask[i] = 0x01FF;
+                       break;
+               case IEEE80211_VHT_MCS_SUPPORT_0_9:
+                       vht_mask[i] = 0x03FF;
+                       break;
+               case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+               default:
+                       vht_mask[i] = 0;
+                       break;
+               }
+       }
+}
index 943f7606527e25b4cad0bfea2a38755f1f7f69d9..feb547dc8643ab286fa0c64f19b01e2b4766898e 100644 (file)
@@ -516,31 +516,34 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
                        return RX_DROP_UNUSABLE;
        }
 
-       ccmp_hdr2pn(pn, skb->data + hdrlen);
+       if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+               ccmp_hdr2pn(pn, skb->data + hdrlen);
 
-       queue = rx->security_idx;
+               queue = rx->security_idx;
 
-       if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) {
-               key->u.ccmp.replays++;
-               return RX_DROP_UNUSABLE;
-       }
+               if (memcmp(pn, key->u.ccmp.rx_pn[queue],
+                          IEEE80211_CCMP_PN_LEN) <= 0) {
+                       key->u.ccmp.replays++;
+                       return RX_DROP_UNUSABLE;
+               }
 
-       if (!(status->flag & RX_FLAG_DECRYPTED)) {
-               u8 aad[2 * AES_BLOCK_SIZE];
-               u8 b_0[AES_BLOCK_SIZE];
-               /* hardware didn't decrypt/verify MIC */
-               ccmp_special_blocks(skb, pn, b_0, aad);
+               if (!(status->flag & RX_FLAG_DECRYPTED)) {
+                       u8 aad[2 * AES_BLOCK_SIZE];
+                       u8 b_0[AES_BLOCK_SIZE];
+                       /* hardware didn't decrypt/verify MIC */
+                       ccmp_special_blocks(skb, pn, b_0, aad);
+
+                       if (ieee80211_aes_ccm_decrypt(
+                                   key->u.ccmp.tfm, b_0, aad,
+                                   skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
+                                   data_len,
+                                   skb->data + skb->len - mic_len, mic_len))
+                               return RX_DROP_UNUSABLE;
+               }
 
-               if (ieee80211_aes_ccm_decrypt(
-                           key->u.ccmp.tfm, b_0, aad,
-                           skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
-                           data_len,
-                           skb->data + skb->len - mic_len, mic_len))
-                       return RX_DROP_UNUSABLE;
+               memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
        }
 
-       memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
-
        /* Remove CCMP header and MIC */
        if (pskb_trim(skb, skb->len - mic_len))
                return RX_DROP_UNUSABLE;
@@ -739,31 +742,35 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
                        return RX_DROP_UNUSABLE;
        }
 
-       gcmp_hdr2pn(pn, skb->data + hdrlen);
+       if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+               gcmp_hdr2pn(pn, skb->data + hdrlen);
 
-       queue = rx->security_idx;
+               queue = rx->security_idx;
 
-       if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) {
-               key->u.gcmp.replays++;
-               return RX_DROP_UNUSABLE;
-       }
+               if (memcmp(pn, key->u.gcmp.rx_pn[queue],
+                          IEEE80211_GCMP_PN_LEN) <= 0) {
+                       key->u.gcmp.replays++;
+                       return RX_DROP_UNUSABLE;
+               }
 
-       if (!(status->flag & RX_FLAG_DECRYPTED)) {
-               u8 aad[2 * AES_BLOCK_SIZE];
-               u8 j_0[AES_BLOCK_SIZE];
-               /* hardware didn't decrypt/verify MIC */
-               gcmp_special_blocks(skb, pn, j_0, aad);
+               if (!(status->flag & RX_FLAG_DECRYPTED)) {
+                       u8 aad[2 * AES_BLOCK_SIZE];
+                       u8 j_0[AES_BLOCK_SIZE];
+                       /* hardware didn't decrypt/verify MIC */
+                       gcmp_special_blocks(skb, pn, j_0, aad);
+
+                       if (ieee80211_aes_gcm_decrypt(
+                                   key->u.gcmp.tfm, j_0, aad,
+                                   skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
+                                   data_len,
+                                   skb->data + skb->len -
+                                   IEEE80211_GCMP_MIC_LEN))
+                               return RX_DROP_UNUSABLE;
+               }
 
-               if (ieee80211_aes_gcm_decrypt(
-                           key->u.gcmp.tfm, j_0, aad,
-                           skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
-                           data_len,
-                           skb->data + skb->len - IEEE80211_GCMP_MIC_LEN))
-                       return RX_DROP_UNUSABLE;
+               memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
        }
 
-       memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
-
        /* Remove GCMP header and MIC */
        if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
                return RX_DROP_UNUSABLE;
index f7ba51e8b4cafbf720c5ee3096c1102cbf2a4438..c865ebb2ace2b0b74cf3ba994c085790cde83da6 100644 (file)
@@ -209,10 +209,6 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->min_be == min_be &&
-           wpan_dev->max_be == max_be)
-               return 0;
-
        wpan_dev->min_be = min_be;
        wpan_dev->max_be = max_be;
        return 0;
@@ -224,9 +220,6 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->short_addr == short_addr)
-               return 0;
-
        wpan_dev->short_addr = short_addr;
        return 0;
 }
@@ -238,9 +231,6 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->csma_retries == max_csma_backoffs)
-               return 0;
-
        wpan_dev->csma_retries = max_csma_backoffs;
        return 0;
 }
@@ -252,9 +242,6 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->frame_retries == max_frame_retries)
-               return 0;
-
        wpan_dev->frame_retries = max_frame_retries;
        return 0;
 }
@@ -265,13 +252,20 @@ ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 {
        ASSERT_RTNL();
 
-       if (wpan_dev->lbt == mode)
-               return 0;
-
        wpan_dev->lbt = mode;
        return 0;
 }
 
+static int
+ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
+                             struct wpan_dev *wpan_dev, bool ackreq)
+{
+       ASSERT_RTNL();
+
+       wpan_dev->ackreq = ackreq;
+       return 0;
+}
+
 const struct cfg802154_ops mac802154_config_ops = {
        .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
        .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
@@ -289,4 +283,5 @@ const struct cfg802154_ops mac802154_config_ops = {
        .set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
        .set_max_frame_retries = ieee802154_set_max_frame_retries,
        .set_lbt_mode = ieee802154_set_lbt_mode,
+       .set_ackreq_default = ieee802154_set_ackreq_default,
 };
index 416de903e46757cfead3fe54106efa07ce6e6245..ed26952f9e143407723dca1ef45d8400424aca3e 100644 (file)
@@ -125,6 +125,14 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
        if (netif_running(dev))
                return -EBUSY;
 
+       /* lowpan need to be down for update
+        * SLAAC address after ifup
+        */
+       if (sdata->wpan_dev.lowpan_dev) {
+               if (netif_running(sdata->wpan_dev.lowpan_dev))
+                       return -EBUSY;
+       }
+
        ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
        if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
                return -EINVAL;
@@ -132,6 +140,13 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
        sdata->wpan_dev.extended_addr = extended_addr;
 
+       /* update lowpan interface mac address when
+        * wpan mac has been changed
+        */
+       if (sdata->wpan_dev.lowpan_dev)
+               memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr,
+                      dev->addr_len);
+
        return mac802154_wpan_update_llsec(dev);
 }
 
@@ -483,8 +498,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
        wpan_dev->min_be = 3;
        wpan_dev->max_be = 5;
        wpan_dev->csma_retries = 4;
-       /* for compatibility, actual default is 3 */
-       wpan_dev->frame_retries = -1;
+       wpan_dev->frame_retries = 3;
 
        wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
        wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
index 9e55431b9a5cc0baf0c40fa9e7a96c3381617ad5..e8cab5bb80c664669f9c34b36e5e9178db83ae13 100644 (file)
@@ -111,7 +111,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
        phy->supported.max_minbe = 8;
        phy->supported.min_maxbe = 3;
        phy->supported.max_maxbe = 8;
-       phy->supported.min_frame_retries = -1;
+       phy->supported.min_frame_retries = 0;
        phy->supported.max_frame_retries = 7;
        phy->supported.max_csma_backoffs = 5;
        phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
@@ -177,11 +177,8 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
        }
 
        if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
-               /* TODO should be 3, but our default value is -1 which means
-                * no ARET handling.
-                */
-               local->phy->supported.min_frame_retries = -1;
-               local->phy->supported.max_frame_retries = -1;
+               local->phy->supported.min_frame_retries = 3;
+               local->phy->supported.max_frame_retries = 3;
        }
 
        if (hw->flags & IEEE802154_HW_PROMISCUOUS)
index b6b9a6c4e7849a35fa2161b7eafd41315b30208b..8c5707db53c5f6e33987b7215a4b3843c60cd610 100644 (file)
 /* This maximum ha length copied from the definition of struct neighbour */
 #define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
 
+enum mpls_payload_type {
+       MPT_UNSPEC, /* IPv4 or IPv6 */
+       MPT_IPV4 = 4,
+       MPT_IPV6 = 6,
+
+       /* Other types not implemented:
+        *  - Pseudo-wire with or without control word (RFC4385)
+        *  - GAL (RFC5586)
+        */
+};
+
 struct mpls_route { /* next hop label forwarding entry */
        struct net_device __rcu *rt_dev;
        struct rcu_head         rt_rcu;
        u32                     rt_label[MAX_NEW_LABELS];
        u8                      rt_protocol; /* routing protocol that set this entry */
+       u8                      rt_payload_type;
        u8                      rt_labels;
        u8                      rt_via_alen;
        u8                      rt_via_table;
@@ -96,16 +108,8 @@ EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
                        struct mpls_entry_decoded dec)
 {
-       /* RFC4385 and RFC5586 encode other packets in mpls such that
-        * they don't conflict with the ip version number, making
-        * decoding by examining the ip version correct in everything
-        * except for the strangest cases.
-        *
-        * The strange cases if we choose to support them will require
-        * manual configuration.
-        */
-       struct iphdr *hdr4;
-       bool success = true;
+       enum mpls_payload_type payload_type;
+       bool success = false;
 
        /* The IPv4 code below accesses through the IPv4 header
         * checksum, which is 12 bytes into the packet.
@@ -120,23 +124,32 @@ static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
        if (!pskb_may_pull(skb, 12))
                return false;
 
-       /* Use ip_hdr to find the ip protocol version */
-       hdr4 = ip_hdr(skb);
-       if (hdr4->version == 4) {
+       payload_type = rt->rt_payload_type;
+       if (payload_type == MPT_UNSPEC)
+               payload_type = ip_hdr(skb)->version;
+
+       switch (payload_type) {
+       case MPT_IPV4: {
+               struct iphdr *hdr4 = ip_hdr(skb);
                skb->protocol = htons(ETH_P_IP);
                csum_replace2(&hdr4->check,
                              htons(hdr4->ttl << 8),
                              htons(dec.ttl << 8));
                hdr4->ttl = dec.ttl;
+               success = true;
+               break;
        }
-       else if (hdr4->version == 6) {
+       case MPT_IPV6: {
                struct ipv6hdr *hdr6 = ipv6_hdr(skb);
                skb->protocol = htons(ETH_P_IPV6);
                hdr6->hop_limit = dec.ttl;
+               success = true;
+               break;
+       }
+       case MPT_UNSPEC:
+               break;
        }
-       else
-               /* version 0 and version 1 are used by pseudo wires */
-               success = false;
+
        return success;
 }
 
@@ -255,16 +268,17 @@ static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
 };
 
 struct mpls_route_config {
-       u32             rc_protocol;
-       u32             rc_ifindex;
-       u16             rc_via_table;
-       u16             rc_via_alen;
-       u8              rc_via[MAX_VIA_ALEN];
-       u32             rc_label;
-       u32             rc_output_labels;
-       u32             rc_output_label[MAX_NEW_LABELS];
-       u32             rc_nlflags;
-       struct nl_info  rc_nlinfo;
+       u32                     rc_protocol;
+       u32                     rc_ifindex;
+       u16                     rc_via_table;
+       u16                     rc_via_alen;
+       u8                      rc_via[MAX_VIA_ALEN];
+       u32                     rc_label;
+       u32                     rc_output_labels;
+       u32                     rc_output_label[MAX_NEW_LABELS];
+       u32                     rc_nlflags;
+       enum mpls_payload_type  rc_payload_type;
+       struct nl_info          rc_nlinfo;
 };
 
 static struct mpls_route *mpls_rt_alloc(size_t alen)
@@ -338,14 +352,14 @@ static unsigned find_free_label(struct net *net)
 #if IS_ENABLED(CONFIG_INET)
 static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
 {
-       struct net_device *dev = NULL;
+       struct net_device *dev;
        struct rtable *rt;
        struct in_addr daddr;
 
        memcpy(&daddr, addr, sizeof(struct in_addr));
        rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
        if (IS_ERR(rt))
-               goto errout;
+               return ERR_CAST(rt);
 
        dev = rt->dst.dev;
        dev_hold(dev);
@@ -353,8 +367,6 @@ static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
        ip_rt_put(rt);
 
        return dev;
-errout:
-       return ERR_PTR(-ENODEV);
 }
 #else
 static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
@@ -366,7 +378,7 @@ static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
 #if IS_ENABLED(CONFIG_IPV6)
 static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
 {
-       struct net_device *dev = NULL;
+       struct net_device *dev;
        struct dst_entry *dst;
        struct flowi6 fl6;
        int err;
@@ -378,16 +390,13 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
        memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
        err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
        if (err)
-               goto errout;
+               return ERR_PTR(err);
 
        dev = dst->dev;
        dev_hold(dev);
        dst_release(dst);
 
        return dev;
-
-errout:
-       return ERR_PTR(err);
 }
 #else
 static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
@@ -416,6 +425,9 @@ static struct net_device *find_outdev(struct net *net,
                dev = dev_get_by_index(net, cfg->rc_ifindex);
        }
 
+       if (!dev)
+               return ERR_PTR(-ENODEV);
+
        return dev;
 }
 
@@ -495,6 +507,7 @@ static int mpls_route_add(struct mpls_route_config *cfg)
                rt->rt_label[i] = cfg->rc_output_label[i];
        rt->rt_protocol = cfg->rc_protocol;
        RCU_INIT_POINTER(rt->rt_dev, dev);
+       rt->rt_payload_type = cfg->rc_payload_type;
        rt->rt_via_table = cfg->rc_via_table;
        memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
 
@@ -1049,6 +1062,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
                        goto nort0;
                RCU_INIT_POINTER(rt0->rt_dev, lo);
                rt0->rt_protocol = RTPROT_KERNEL;
+               rt0->rt_payload_type = MPT_IPV4;
                rt0->rt_via_table = NEIGH_LINK_TABLE;
                memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
        }
@@ -1059,6 +1073,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
                        goto nort2;
                RCU_INIT_POINTER(rt2->rt_dev, lo);
                rt2->rt_protocol = RTPROT_KERNEL;
+               rt2->rt_payload_type = MPT_IPV6;
                rt2->rt_via_table = NEIGH_LINK_TABLE;
                memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
        }
index 276f8c9922184ada1e505cd31cf028552906f217..21e70bc9af989355521f8fd99ea3bc07730626c8 100644 (file)
@@ -48,7 +48,6 @@ int mpls_output(struct sock *sk, struct sk_buff *skb)
        struct dst_entry *dst = skb_dst(skb);
        struct rtable *rt = NULL;
        struct rt6_info *rt6 = NULL;
-       struct lwtunnel_state *lwtstate = NULL;
        int err = 0;
        bool bos;
        int i;
@@ -58,11 +57,9 @@ int mpls_output(struct sock *sk, struct sk_buff *skb)
        if (skb->protocol == htons(ETH_P_IP)) {
                ttl = ip_hdr(skb)->ttl;
                rt = (struct rtable *)dst;
-               lwtstate = rt->rt_lwtstate;
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
                ttl = ipv6_hdr(skb)->hop_limit;
                rt6 = (struct rt6_info *)dst;
-               lwtstate = rt6->rt6i_lwtstate;
        } else {
                goto drop;
        }
@@ -72,12 +69,12 @@ int mpls_output(struct sock *sk, struct sk_buff *skb)
        /* Find the output device */
        out_dev = dst->dev;
        if (!mpls_output_possible(out_dev) ||
-           !lwtstate || skb_warn_if_lro(skb))
+           !dst->lwtstate || skb_warn_if_lro(skb))
                goto drop;
 
        skb_forward_csum(skb);
 
-       tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+       tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
 
        /* Verify the destination can hold the packet */
        new_header_size = mpls_encap_size(tun_encap_info);
@@ -126,6 +123,7 @@ drop:
 }
 
 static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
+                           unsigned int family, const void *cfg,
                            struct lwtunnel_state **ts)
 {
        struct mpls_iptunnel_encap *tun_encap_info;
index 6eae69a698ed8344fee68a46e767a2393e53148a..3e1b4abf1897a5bdeca9e5fa061bd06d9b858263 100644 (file)
@@ -867,6 +867,8 @@ config NETFILTER_XT_TARGET_TEE
        depends on NETFILTER_ADVANCED
        depends on IPV6 || IPV6=n
        depends on !NF_CONNTRACK || NF_CONNTRACK
+       select NF_DUP_IPV4
+       select NF_DUP_IPV6 if IP6_NF_IPTABLES
        ---help---
        This option adds a "TEE" target with which a packet can be cloned and
        this clone be rerouted to another nexthop.
index 5882bbfd198c24b9e72d1d6ddc05f21ad38f53bc..136184572fc9d274a5ef9493852c57045119ce5c 100644 (file)
@@ -274,7 +274,7 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
                " for conn " FMT_CONN "\n",
                __func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
 
-       h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+       h = nf_conntrack_find_get(ip_vs_conn_net(cp), &nf_ct_zone_dflt,
                                  &tuple);
        if (h) {
                ct = nf_ct_tuplehash_to_ctrack(h);
index 651039ad1681db0434cff21275f1bcbe3f8464bc..ac3be9b0629b7aee7743d414bbffc994ecc9704f 100644 (file)
@@ -126,7 +126,7 @@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 unsigned int nf_conntrack_hash_rnd __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
 
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
 {
        unsigned int n;
 
@@ -135,7 +135,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
         * three bytes manually.
         */
        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
-       return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
+       return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
                      (((__force __u16)tuple->dst.u.all << 16) |
                      tuple->dst.protonum));
 }
@@ -151,15 +151,15 @@ static u32 hash_bucket(u32 hash, const struct net *net)
 }
 
 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
-                                 u16 zone, unsigned int size)
+                                 unsigned int size)
 {
-       return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
+       return __hash_bucket(hash_conntrack_raw(tuple), size);
 }
 
-static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
+static inline u_int32_t hash_conntrack(const struct net *net,
                                       const struct nf_conntrack_tuple *tuple)
 {
-       return __hash_conntrack(tuple, zone, net->ct.htable_size);
+       return __hash_conntrack(tuple, net->ct.htable_size);
 }
 
 bool
@@ -288,35 +288,28 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
 }
 
 /* Released via destroy_conntrack() */
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+                                const struct nf_conntrack_zone *zone,
+                                gfp_t flags)
 {
        struct nf_conn *tmpl;
 
-       tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL);
+       tmpl = kzalloc(sizeof(*tmpl), flags);
        if (tmpl == NULL)
                return NULL;
 
        tmpl->status = IPS_TEMPLATE;
        write_pnet(&tmpl->ct_net, net);
 
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-       if (zone) {
-               struct nf_conntrack_zone *nf_ct_zone;
+       if (nf_ct_zone_add(tmpl, flags, zone) < 0)
+               goto out_free;
 
-               nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
-               if (!nf_ct_zone)
-                       goto out_free;
-               nf_ct_zone->id = zone;
-       }
-#endif
        atomic_set(&tmpl->ct_general.use, 0);
 
        return tmpl;
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
        kfree(tmpl);
        return NULL;
-#endif
 }
 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
 
@@ -373,7 +366,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
        unsigned int hash, reply_hash;
-       u16 zone = nf_ct_zone(ct);
        unsigned int sequence;
 
        nf_ct_helper_destroy(ct);
@@ -381,9 +373,9 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
        local_bh_disable();
        do {
                sequence = read_seqcount_begin(&net->ct.generation);
-               hash = hash_conntrack(net, zone,
+               hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-               reply_hash = hash_conntrack(net, zone,
+               reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -431,8 +423,8 @@ static void death_by_timeout(unsigned long ul_conntrack)
 
 static inline bool
 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
-                       const struct nf_conntrack_tuple *tuple,
-                       u16 zone)
+               const struct nf_conntrack_tuple *tuple,
+               const struct nf_conntrack_zone *zone)
 {
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 
@@ -440,8 +432,8 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
         * so we need to check that the conntrack is confirmed
         */
        return nf_ct_tuple_equal(tuple, &h->tuple) &&
-               nf_ct_zone(ct) == zone &&
-               nf_ct_is_confirmed(ct);
+              nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+              nf_ct_is_confirmed(ct);
 }
 
 /*
@@ -450,7 +442,7 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
  */
 static struct nf_conntrack_tuple_hash *
-____nf_conntrack_find(struct net *net, u16 zone,
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple, u32 hash)
 {
        struct nf_conntrack_tuple_hash *h;
@@ -486,7 +478,7 @@ begin:
 
 /* Find a connection corresponding to a tuple. */
 static struct nf_conntrack_tuple_hash *
-__nf_conntrack_find_get(struct net *net, u16 zone,
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                        const struct nf_conntrack_tuple *tuple, u32 hash)
 {
        struct nf_conntrack_tuple_hash *h;
@@ -513,11 +505,11 @@ begin:
 }
 
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple)
 {
        return __nf_conntrack_find_get(net, zone, tuple,
-                                      hash_conntrack_raw(tuple, zone));
+                                      hash_conntrack_raw(tuple));
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
@@ -536,11 +528,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 int
 nf_conntrack_hash_check_insert(struct nf_conn *ct)
 {
+       const struct nf_conntrack_zone *zone;
        struct net *net = nf_ct_net(ct);
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
-       u16 zone;
        unsigned int sequence;
 
        zone = nf_ct_zone(ct);
@@ -548,9 +540,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        local_bh_disable();
        do {
                sequence = read_seqcount_begin(&net->ct.generation);
-               hash = hash_conntrack(net, zone,
+               hash = hash_conntrack(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-               reply_hash = hash_conntrack(net, zone,
+               reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -558,12 +550,14 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
 
        add_timer(&ct->timeout);
@@ -588,6 +582,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
 {
+       const struct nf_conntrack_zone *zone;
        unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
@@ -596,7 +591,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        struct hlist_nulls_node *n;
        enum ip_conntrack_info ctinfo;
        struct net *net;
-       u16 zone;
        unsigned int sequence;
 
        ct = nf_ct_get(skb, &ctinfo);
@@ -617,7 +611,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                /* reuse the hash saved before */
                hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
                hash = hash_bucket(hash, net);
-               reply_hash = hash_conntrack(net, zone,
+               reply_hash = hash_conntrack(net,
                                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
@@ -649,12 +643,14 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
-                   zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+                   nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+                                    NF_CT_DIRECTION(h)))
                        goto out;
 
        /* Timer relative to confirmation time, not original
@@ -707,11 +703,14 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
                         const struct nf_conn *ignored_conntrack)
 {
        struct net *net = nf_ct_net(ignored_conntrack);
+       const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
        struct nf_conn *ct;
-       u16 zone = nf_ct_zone(ignored_conntrack);
-       unsigned int hash = hash_conntrack(net, zone, tuple);
+       unsigned int hash;
+
+       zone = nf_ct_zone(ignored_conntrack);
+       hash = hash_conntrack(net, tuple);
 
        /* Disable BHs the entire time since we need to disable them at
         * least once for the stats anyway.
@@ -721,7 +720,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
                ct = nf_ct_tuplehash_to_ctrack(h);
                if (ct != ignored_conntrack &&
                    nf_ct_tuple_equal(tuple, &h->tuple) &&
-                   nf_ct_zone(ct) == zone) {
+                   nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
                        NF_CT_STAT_INC(net, found);
                        rcu_read_unlock_bh();
                        return 1;
@@ -810,7 +809,8 @@ void init_nf_conntrack_hash_rnd(void)
 }
 
 static struct nf_conn *
-__nf_conntrack_alloc(struct net *net, u16 zone,
+__nf_conntrack_alloc(struct net *net,
+                    const struct nf_conntrack_zone *zone,
                     const struct nf_conntrack_tuple *orig,
                     const struct nf_conntrack_tuple *repl,
                     gfp_t gfp, u32 hash)
@@ -820,7 +820,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
        if (unlikely(!nf_conntrack_hash_rnd)) {
                init_nf_conntrack_hash_rnd();
                /* recompute the hash as nf_conntrack_hash_rnd is initialized */
-               hash = hash_conntrack_raw(orig, zone);
+               hash = hash_conntrack_raw(orig);
        }
 
        /* We don't want any race condition at early drop stage */
@@ -840,10 +840,9 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
         * SLAB_DESTROY_BY_RCU.
         */
        ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
-       if (ct == NULL) {
-               atomic_dec(&net->ct.count);
-               return ERR_PTR(-ENOMEM);
-       }
+       if (ct == NULL)
+               goto out;
+
        spin_lock_init(&ct->lock);
        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
        ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -857,31 +856,24 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
        memset(&ct->__nfct_init_offset[0], 0,
               offsetof(struct nf_conn, proto) -
               offsetof(struct nf_conn, __nfct_init_offset[0]));
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-       if (zone) {
-               struct nf_conntrack_zone *nf_ct_zone;
 
-               nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
-               if (!nf_ct_zone)
-                       goto out_free;
-               nf_ct_zone->id = zone;
-       }
-#endif
+       if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
+               goto out_free;
+
        /* Because we use RCU lookups, we set ct_general.use to zero before
         * this is inserted in any list.
         */
        atomic_set(&ct->ct_general.use, 0);
        return ct;
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
-       atomic_dec(&net->ct.count);
        kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+out:
+       atomic_dec(&net->ct.count);
        return ERR_PTR(-ENOMEM);
-#endif
 }
 
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+                                  const struct nf_conntrack_zone *zone,
                                   const struct nf_conntrack_tuple *orig,
                                   const struct nf_conntrack_tuple *repl,
                                   gfp_t gfp)
@@ -923,8 +915,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        struct nf_conntrack_tuple repl_tuple;
        struct nf_conntrack_ecache *ecache;
        struct nf_conntrack_expect *exp = NULL;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+       const struct nf_conntrack_zone *zone;
        struct nf_conn_timeout *timeout_ext;
+       struct nf_conntrack_zone tmp;
        unsigned int *timeouts;
 
        if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
@@ -932,6 +925,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
                return NULL;
        }
 
+       zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
        ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
                                  hash);
        if (IS_ERR(ct))
@@ -1026,10 +1020,11 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
                  int *set_reply,
                  enum ip_conntrack_info *ctinfo)
 {
+       const struct nf_conntrack_zone *zone;
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_tuple_hash *h;
+       struct nf_conntrack_zone tmp;
        struct nf_conn *ct;
-       u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
        u32 hash;
 
        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
@@ -1040,7 +1035,8 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
        }
 
        /* look for tuple match */
-       hash = hash_conntrack_raw(&tuple, zone);
+       zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+       hash = hash_conntrack_raw(&tuple);
        h = __nf_conntrack_find_get(net, zone, &tuple, hash);
        if (!h) {
                h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
@@ -1290,6 +1286,13 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
 }
 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
 
+/* Built-in default zone used e.g. by modules. */
+const struct nf_conntrack_zone nf_ct_zone_dflt = {
+       .id     = NF_CT_DEFAULT_ZONE_ID,
+       .dir    = NF_CT_DEFAULT_ZONE_DIR,
+};
+EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
+
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
        .len    = sizeof(struct nf_conntrack_zone),
@@ -1544,10 +1547,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
        sz = nr_slots * sizeof(struct hlist_nulls_head);
        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                                        get_order(sz));
-       if (!hash) {
-               printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
+       if (!hash)
                hash = vzalloc(sz);
-       }
 
        if (hash && nulls)
                for (i = 0; i < nr_slots; i++)
@@ -1598,8 +1599,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
                                        struct nf_conntrack_tuple_hash, hnnode);
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        hlist_nulls_del_rcu(&h->hnnode);
-                       bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
-                                                 hashsize);
+                       bucket = __hash_conntrack(&h->tuple, hashsize);
                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
                }
        }
index b45a4223cb058a47ae2863a4166cd5085e587ca6..acf5c7b3f378c600ec983a0b92e7eb935c56b0c8 100644 (file)
@@ -88,7 +88,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
 }
 
 struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+                   const struct nf_conntrack_zone *zone,
                    const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_expect *i;
@@ -100,7 +101,7 @@ __nf_ct_expect_find(struct net *net, u16 zone,
        h = nf_ct_expect_dst_hash(tuple);
        hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
                if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-                   nf_ct_zone(i->master) == zone)
+                   nf_ct_zone_equal_any(i->master, zone))
                        return i;
        }
        return NULL;
@@ -109,7 +110,8 @@ EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
 
 /* Just find a expectation corresponding to a tuple. */
 struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+                     const struct nf_conntrack_zone *zone,
                      const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_expect *i;
@@ -127,7 +129,8 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
 /* If an expectation for this connection is found, it gets delete from
  * global list then returned. */
 struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+                      const struct nf_conntrack_zone *zone,
                       const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_expect *i, *exp = NULL;
@@ -140,7 +143,7 @@ nf_ct_find_expectation(struct net *net, u16 zone,
        hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
                if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
                    nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-                   nf_ct_zone(i->master) == zone) {
+                   nf_ct_zone_equal_any(i->master, zone)) {
                        exp = i;
                        break;
                }
@@ -220,16 +223,16 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
        }
 
        return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
-              nf_ct_zone(a->master) == nf_ct_zone(b->master);
+              nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
 static inline int expect_matches(const struct nf_conntrack_expect *a,
                                 const struct nf_conntrack_expect *b)
 {
        return a->master == b->master && a->class == b->class &&
-               nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
-               nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
-               nf_ct_zone(a->master) == nf_ct_zone(b->master);
+              nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
+              nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
+              nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
 /* Generally a bad idea to call this: could have matched already. */
index 6b8b0abbfab482280ae6a318f8bc58260e0b21c8..94a66541e0b76a1764ad6b2d73bf8f6cd1310a6b 100644 (file)
@@ -127,6 +127,20 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
        return ret;
 }
 
+static inline int
+ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
+                      const struct nf_conntrack_zone *zone, int dir)
+{
+       if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
+               return 0;
+       if (nla_put_be16(skb, attrtype, htons(zone->id)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
 static inline int
 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
@@ -458,6 +472,7 @@ static int
 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                    struct nf_conn *ct)
 {
+       const struct nf_conntrack_zone *zone;
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        struct nlattr *nest_parms;
@@ -473,11 +488,16 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
        nfmsg->version      = NFNETLINK_V0;
        nfmsg->res_id       = 0;
 
+       zone = nf_ct_zone(ct);
+
        nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
        if (!nest_parms)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_ORIG) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
        nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -485,10 +505,13 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_REPL) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
-       if (nf_ct_zone(ct) &&
-           nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+       if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+                                  NF_CT_DEFAULT_ZONE_DIR) < 0)
                goto nla_put_failure;
 
        if (ctnetlink_dump_status(skb, ct) < 0 ||
@@ -598,7 +621,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
               + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
 #endif
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
 #endif
               + ctnetlink_proto_size(ct)
               + ctnetlink_label_size(ct)
@@ -609,6 +632,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
 static int
 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
 {
+       const struct nf_conntrack_zone *zone;
        struct net *net;
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
@@ -655,11 +679,16 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
        nfmsg->res_id   = 0;
 
        rcu_read_lock();
+       zone = nf_ct_zone(ct);
+
        nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
        if (!nest_parms)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_ORIG) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
        nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -667,10 +696,13 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_REPL) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
-       if (nf_ct_zone(ct) &&
-           nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+       if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+                                  NF_CT_DEFAULT_ZONE_DIR) < 0)
                goto nla_put_failure;
 
        if (ctnetlink_dump_id(skb, ct) < 0)
@@ -920,15 +952,54 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
        return ret;
 }
 
+static int
+ctnetlink_parse_zone(const struct nlattr *attr,
+                    struct nf_conntrack_zone *zone)
+{
+       nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
+                       NF_CT_DEFAULT_ZONE_DIR, 0);
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       if (attr)
+               zone->id = ntohs(nla_get_be16(attr));
+#else
+       if (attr)
+               return -EOPNOTSUPP;
+#endif
+       return 0;
+}
+
+static int
+ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
+                          struct nf_conntrack_zone *zone)
+{
+       int ret;
+
+       if (zone->id != NF_CT_DEFAULT_ZONE_ID)
+               return -EINVAL;
+
+       ret = ctnetlink_parse_zone(attr, zone);
+       if (ret < 0)
+               return ret;
+
+       if (type == CTA_TUPLE_REPLY)
+               zone->dir = NF_CT_ZONE_DIR_REPL;
+       else
+               zone->dir = NF_CT_ZONE_DIR_ORIG;
+
+       return 0;
+}
+
 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
        [CTA_TUPLE_IP]          = { .type = NLA_NESTED },
        [CTA_TUPLE_PROTO]       = { .type = NLA_NESTED },
+       [CTA_TUPLE_ZONE]        = { .type = NLA_U16 },
 };
 
 static int
 ctnetlink_parse_tuple(const struct nlattr * const cda[],
                      struct nf_conntrack_tuple *tuple,
-                     enum ctattr_type type, u_int8_t l3num)
+                     enum ctattr_type type, u_int8_t l3num,
+                     struct nf_conntrack_zone *zone)
 {
        struct nlattr *tb[CTA_TUPLE_MAX+1];
        int err;
@@ -955,6 +1026,16 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
        if (err < 0)
                return err;
 
+       if (tb[CTA_TUPLE_ZONE]) {
+               if (!zone)
+                       return -EINVAL;
+
+               err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
+                                                type, zone);
+               if (err < 0)
+                       return err;
+       }
+
        /* orig and expect tuples get DIR_ORIGINAL */
        if (type == CTA_TUPLE_REPLY)
                tuple->dst.dir = IP_CT_DIR_REPLY;
@@ -964,21 +1045,6 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
        return 0;
 }
 
-static int
-ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
-{
-       if (attr)
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-               *zone = ntohs(nla_get_be16(attr));
-#else
-               return -EOPNOTSUPP;
-#endif
-       else
-               *zone = 0;
-
-       return 0;
-}
-
 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
        [CTA_HELP_NAME]         = { .type = NLA_NUL_STRING,
                                    .len = NF_CT_HELPER_NAME_LEN - 1 },
@@ -1058,7 +1124,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        struct nf_conn *ct;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1066,9 +1132,11 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        if (cda[CTA_TUPLE_ORIG])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+                                           u3, &zone);
        else if (cda[CTA_TUPLE_REPLY])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+                                           u3, &zone);
        else {
                return ctnetlink_flush_conntrack(net, cda,
                                                 NETLINK_CB(skb).portid,
@@ -1078,7 +1146,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       h = nf_conntrack_find_get(net, zone, &tuple);
+       h = nf_conntrack_find_get(net, &zone, &tuple);
        if (!h)
                return -ENOENT;
 
@@ -1112,7 +1180,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
        struct sk_buff *skb2 = NULL;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -1138,16 +1206,18 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        if (cda[CTA_TUPLE_ORIG])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+                                           u3, &zone);
        else if (cda[CTA_TUPLE_REPLY])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+                                           u3, &zone);
        else
                return -EINVAL;
 
        if (err < 0)
                return err;
 
-       h = nf_conntrack_find_get(net, zone, &tuple);
+       h = nf_conntrack_find_get(net, &zone, &tuple);
        if (!h)
                return -ENOENT;
 
@@ -1645,7 +1715,8 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
 }
 
 static struct nf_conn *
-ctnetlink_create_conntrack(struct net *net, u16 zone,
+ctnetlink_create_conntrack(struct net *net,
+                          const struct nf_conntrack_zone *zone,
                           const struct nlattr * const cda[],
                           struct nf_conntrack_tuple *otuple,
                           struct nf_conntrack_tuple *rtuple,
@@ -1761,7 +1832,8 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
                struct nf_conntrack_tuple_hash *master_h;
                struct nf_conn *master_ct;
 
-               err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
+               err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
+                                           u3, NULL);
                if (err < 0)
                        goto err2;
 
@@ -1804,7 +1876,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        struct nf_conn *ct;
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1812,21 +1884,23 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        if (cda[CTA_TUPLE_ORIG]) {
-               err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
+               err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
+                                           u3, &zone);
                if (err < 0)
                        return err;
        }
 
        if (cda[CTA_TUPLE_REPLY]) {
-               err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
+               err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
+                                           u3, &zone);
                if (err < 0)
                        return err;
        }
 
        if (cda[CTA_TUPLE_ORIG])
-               h = nf_conntrack_find_get(net, zone, &otuple);
+               h = nf_conntrack_find_get(net, &zone, &otuple);
        else if (cda[CTA_TUPLE_REPLY])
-               h = nf_conntrack_find_get(net, zone, &rtuple);
+               h = nf_conntrack_find_get(net, &zone, &rtuple);
 
        if (h == NULL) {
                err = -ENOENT;
@@ -1836,7 +1910,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                        if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
                                return -EINVAL;
 
-                       ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
+                       ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
                                                        &rtuple, u3);
                        if (IS_ERR(ct))
                                return PTR_ERR(ct);
@@ -2082,7 +2156,7 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
               + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
 #endif
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
 #endif
               + ctnetlink_proto_size(ct)
               ;
@@ -2091,14 +2165,20 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
 static int
 ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
 {
+       const struct nf_conntrack_zone *zone;
        struct nlattr *nest_parms;
 
        rcu_read_lock();
+       zone = nf_ct_zone(ct);
+
        nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
        if (!nest_parms)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_ORIG) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
        nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -2106,12 +2186,14 @@ ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
                goto nla_put_failure;
        if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
                goto nla_put_failure;
+       if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+                                  NF_CT_ZONE_DIR_REPL) < 0)
+               goto nla_put_failure;
        nla_nest_end(skb, nest_parms);
 
-       if (nf_ct_zone(ct)) {
-               if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
-                       goto nla_put_failure;
-       }
+       if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+                                  NF_CT_DEFAULT_ZONE_DIR) < 0)
+               goto nla_put_failure;
 
        if (ctnetlink_dump_id(skb, ct) < 0)
                goto nla_put_failure;
@@ -2218,12 +2300,12 @@ static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
        int err;
 
        err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
-                                   nf_ct_l3num(ct));
+                                   nf_ct_l3num(ct), NULL);
        if (err < 0)
                return err;
 
        return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
-                                    nf_ct_l3num(ct));
+                                    nf_ct_l3num(ct), NULL);
 }
 
 static int
@@ -2612,23 +2694,22 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
        struct nf_conntrack_tuple tuple;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
-       u16 zone = 0;
+       struct nf_conntrack_zone zone;
        struct netlink_dump_control c = {
                .dump = ctnetlink_exp_ct_dump_table,
                .done = ctnetlink_exp_done,
        };
 
-       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+                                   u3, NULL);
        if (err < 0)
                return err;
 
-       if (cda[CTA_EXPECT_ZONE]) {
-               err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
-               if (err < 0)
-                       return err;
-       }
+       err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
+       if (err < 0)
+               return err;
 
-       h = nf_conntrack_find_get(net, zone, &tuple);
+       h = nf_conntrack_find_get(net, &zone, &tuple);
        if (!h)
                return -ENOENT;
 
@@ -2652,7 +2733,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
        struct sk_buff *skb2;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -2672,16 +2753,18 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        if (cda[CTA_EXPECT_TUPLE])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+                                           u3, NULL);
        else if (cda[CTA_EXPECT_MASTER])
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+                                           u3, NULL);
        else
                return -EINVAL;
 
        if (err < 0)
                return err;
 
-       exp = nf_ct_expect_find_get(net, zone, &tuple);
+       exp = nf_ct_expect_find_get(net, &zone, &tuple);
        if (!exp)
                return -ENOENT;
 
@@ -2732,8 +2815,8 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        struct hlist_node *next;
        u_int8_t u3 = nfmsg->nfgen_family;
+       struct nf_conntrack_zone zone;
        unsigned int i;
-       u16 zone;
        int err;
 
        if (cda[CTA_EXPECT_TUPLE]) {
@@ -2742,12 +2825,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                if (err < 0)
                        return err;
 
-               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+               err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+                                           u3, NULL);
                if (err < 0)
                        return err;
 
                /* bump usage count to 2 */
-               exp = nf_ct_expect_find_get(net, zone, &tuple);
+               exp = nf_ct_expect_find_get(net, &zone, &tuple);
                if (!exp)
                        return -ENOENT;
 
@@ -2849,7 +2933,8 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
                return -EINVAL;
 
        err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
-                                       &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
+                                   &nat_tuple, CTA_EXPECT_NAT_TUPLE,
+                                   u3, NULL);
        if (err < 0)
                return err;
 
@@ -2937,7 +3022,8 @@ err_out:
 }
 
 static int
-ctnetlink_create_expect(struct net *net, u16 zone,
+ctnetlink_create_expect(struct net *net,
+                       const struct nf_conntrack_zone *zone,
                        const struct nlattr * const cda[],
                        u_int8_t u3, u32 portid, int report)
 {
@@ -2949,13 +3035,16 @@ ctnetlink_create_expect(struct net *net, u16 zone,
        int err;
 
        /* caller guarantees that those three CTA_EXPECT_* exist */
-       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+                                   u3, NULL);
        if (err < 0)
                return err;
-       err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+       err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
+                                   u3, NULL);
        if (err < 0)
                return err;
-       err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+       err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
+                                   u3, NULL);
        if (err < 0)
                return err;
 
@@ -3011,7 +3100,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
        struct nf_conntrack_expect *exp;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        u_int8_t u3 = nfmsg->nfgen_family;
-       u16 zone;
+       struct nf_conntrack_zone zone;
        int err;
 
        if (!cda[CTA_EXPECT_TUPLE]
@@ -3023,19 +3112,18 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+                                   u3, NULL);
        if (err < 0)
                return err;
 
        spin_lock_bh(&nf_conntrack_expect_lock);
-       exp = __nf_ct_expect_find(net, zone, &tuple);
-
+       exp = __nf_ct_expect_find(net, &zone, &tuple);
        if (!exp) {
                spin_unlock_bh(&nf_conntrack_expect_lock);
                err = -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_CREATE) {
-                       err = ctnetlink_create_expect(net, zone, cda,
-                                                     u3,
+                       err = ctnetlink_create_expect(net, &zone, cda, u3,
                                                      NETLINK_CB(skb).portid,
                                                      nlmsg_report(nlh));
                }
index 825c3e3f83053582dba71c0128d706cb516c943a..5588c7ae1ac26740df91576867d5aff51aa961f9 100644 (file)
@@ -143,13 +143,14 @@ static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
                                  const struct nf_conntrack_tuple *t)
 {
        const struct nf_conntrack_tuple_hash *h;
+       const struct nf_conntrack_zone *zone;
        struct nf_conntrack_expect *exp;
        struct nf_conn *sibling;
-       u16 zone = nf_ct_zone(ct);
 
        pr_debug("trying to timeout ct or exp for tuple ");
        nf_ct_dump_tuple(t);
 
+       zone = nf_ct_zone(ct);
        h = nf_conntrack_find_get(net, zone, t);
        if (h)  {
                sibling = nf_ct_tuplehash_to_ctrack(h);
index ce3e840c870452b705744f9b64fb32661b6c82a5..dff0f0cc59e456171d81b8c5400bf30378f5ea66 100644 (file)
@@ -103,9 +103,9 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
                         ntohl(sack->end_seq), ntohl(new_end_seq));
 
                inet_proto_csum_replace4(&tcph->check, skb,
-                                        sack->start_seq, new_start_seq, 0);
+                                        sack->start_seq, new_start_seq, false);
                inet_proto_csum_replace4(&tcph->check, skb,
-                                        sack->end_seq, new_end_seq, 0);
+                                        sack->end_seq, new_end_seq, false);
                sack->start_seq = new_start_seq;
                sack->end_seq = new_end_seq;
                sackoff += sizeof(*sack);
@@ -193,8 +193,9 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
        newseq = htonl(ntohl(tcph->seq) + seqoff);
        newack = htonl(ntohl(tcph->ack_seq) - ackoff);
 
-       inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
-       inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
+       inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
+       inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack,
+                                false);
 
        pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
                 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
index fc823fa5dcf53794bc8977cb5502d1dac92938e2..1fb3cacc04e16794ce27e9061893b9a90015fb82 100644 (file)
@@ -140,6 +140,35 @@ static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
 }
 #endif
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+                        int dir)
+{
+       const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+
+       if (zone->dir != dir)
+               return;
+       switch (zone->dir) {
+       case NF_CT_DEFAULT_ZONE_DIR:
+               seq_printf(s, "zone=%u ", zone->id);
+               break;
+       case NF_CT_ZONE_DIR_ORIG:
+               seq_printf(s, "zone-orig=%u ", zone->id);
+               break;
+       case NF_CT_ZONE_DIR_REPL:
+               seq_printf(s, "zone-reply=%u ", zone->id);
+               break;
+       default:
+               break;
+       }
+}
+#else
+static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+                               int dir)
+{
+}
+#endif
+
 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
 static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
 {
@@ -202,6 +231,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
        print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
                    l3proto, l4proto);
 
+       ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
+
        if (seq_has_overflowed(s))
                goto release;
 
@@ -214,6 +245,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
        print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                    l3proto, l4proto);
 
+       ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
+
        if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
                goto release;
 
@@ -228,11 +261,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
 #endif
 
        ct_show_secctx(s, ct);
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-       seq_printf(s, "zone=%u ", nf_ct_zone(ct));
-#endif
-
+       ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
        ct_show_delta_time(s, ct);
 
        seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
index 4e0b47831d43a25f021a1eeb2c62c2307b8630e1..5113dfd39df929967f247ca644a96664e6d72f1a 100644 (file)
@@ -118,14 +118,13 @@ EXPORT_SYMBOL(nf_xfrm_me_harder);
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
-hash_by_src(const struct net *net, u16 zone,
-           const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
 {
        unsigned int hash;
 
        /* Original src, to ensure we map it consistently if poss. */
        hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
-                     tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
+                     tuple->dst.protonum ^ nf_conntrack_hash_rnd);
 
        return reciprocal_scale(hash, net->ct.nat_htable_size);
 }
@@ -185,20 +184,22 @@ same_src(const struct nf_conn *ct,
 
 /* Only called for SRC manip */
 static int
-find_appropriate_src(struct net *net, u16 zone,
+find_appropriate_src(struct net *net,
+                    const struct nf_conntrack_zone *zone,
                     const struct nf_nat_l3proto *l3proto,
                     const struct nf_nat_l4proto *l4proto,
                     const struct nf_conntrack_tuple *tuple,
                     struct nf_conntrack_tuple *result,
                     const struct nf_nat_range *range)
 {
-       unsigned int h = hash_by_src(net, zone, tuple);
+       unsigned int h = hash_by_src(net, tuple);
        const struct nf_conn_nat *nat;
        const struct nf_conn *ct;
 
        hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
                ct = nat->ct;
-               if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
+               if (same_src(ct, tuple) &&
+                   nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
                        /* Copy source part from reply tuple. */
                        nf_ct_invert_tuplepr(result,
                                       &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
@@ -218,7 +219,8 @@ find_appropriate_src(struct net *net, u16 zone,
  * the ip with the lowest src-ip/dst-ip/proto usage.
  */
 static void
-find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
+find_best_ips_proto(const struct nf_conntrack_zone *zone,
+                   struct nf_conntrack_tuple *tuple,
                    const struct nf_nat_range *range,
                    const struct nf_conn *ct,
                    enum nf_nat_manip_type maniptype)
@@ -258,7 +260,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
         */
        j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
                   range->flags & NF_NAT_RANGE_PERSISTENT ?
-                       0 : (__force u32)tuple->dst.u3.all[max] ^ zone);
+                       0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
 
        full_range = false;
        for (i = 0; i <= max; i++) {
@@ -297,10 +299,12 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
                 struct nf_conn *ct,
                 enum nf_nat_manip_type maniptype)
 {
+       const struct nf_conntrack_zone *zone;
        const struct nf_nat_l3proto *l3proto;
        const struct nf_nat_l4proto *l4proto;
        struct net *net = nf_ct_net(ct);
-       u16 zone = nf_ct_zone(ct);
+
+       zone = nf_ct_zone(ct);
 
        rcu_read_lock();
        l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
@@ -420,7 +424,7 @@ nf_nat_setup_info(struct nf_conn *ct,
        if (maniptype == NF_NAT_MANIP_SRC) {
                unsigned int srchash;
 
-               srchash = hash_by_src(net, nf_ct_zone(ct),
+               srchash = hash_by_src(net,
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                spin_lock_bh(&nf_nat_lock);
                /* nf_conntrack_alter_reply might re-allocate extension aera */
index b8067b53ff3a8579e9ba126288c65ee89702fbac..15c47b246d0d0a0632574e56d2caa9d12514966d 100644 (file)
@@ -69,7 +69,7 @@ dccp_manip_pkt(struct sk_buff *skb,
        l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
                             tuple, maniptype);
        inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
-                                0);
+                                false);
        return true;
 }
 
index 37f5505f4529be54f45ef773088ca7aadbbccaed..4f8820fc514804d775274330f590fe0d1dbab54f 100644 (file)
@@ -70,7 +70,7 @@ tcp_manip_pkt(struct sk_buff *skb,
                return true;
 
        l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
-       inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
+       inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
        return true;
 }
 
index b0ede2f0d8bcbd0c7ee156cec4c73a432b9438cd..b1e627227b6e2670fb6ce9d151e8965a4c8731c3 100644 (file)
@@ -57,7 +57,7 @@ udp_manip_pkt(struct sk_buff *skb,
                l3proto->csum_update(skb, iphdroff, &hdr->check,
                                     tuple, maniptype);
                inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
-                                        0);
+                                        false);
                if (!hdr->check)
                        hdr->check = CSUM_MANGLED_0;
        }
index 368f14e01e758d9771534eee7744d5d5a66cf8ba..58340c97bd836ffedd512a895cc02f56ee05f169 100644 (file)
@@ -56,7 +56,7 @@ udplite_manip_pkt(struct sk_buff *skb,
        }
 
        l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
-       inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
+       inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false);
        if (!hdr->check)
                hdr->check = CSUM_MANGLED_0;
 
index 71f1e9fdfa18fb9b1f2f2730ca21af42dad98eea..8fbbdb09826eefd29105b3e80f87c1e54dbb64a9 100644 (file)
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_tcpudp.h>
 #include <linux/netfilter/xt_SYNPROXY.h>
+
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_seqadj.h>
 #include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 int synproxy_net_id;
 EXPORT_SYMBOL_GPL(synproxy_net_id);
@@ -225,7 +227,7 @@ unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
                                                     synproxy->tsoff);
                                }
                                inet_proto_csum_replace4(&th->check, skb,
-                                                        old, *ptr, 0);
+                                                        old, *ptr, false);
                                return 1;
                        }
                        optoff += op[1];
@@ -352,11 +354,9 @@ static int __net_init synproxy_net_init(struct net *net)
        struct nf_conn *ct;
        int err = -ENOMEM;
 
-       ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
-       if (IS_ERR(ct)) {
-               err = PTR_ERR(ct);
+       ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL);
+       if (!ct)
                goto err1;
-       }
 
        if (!nfct_seqadj_ext_add(ct))
                goto err2;
index c18af2f63eefb07e00be893190c35492232d4008..fefbf5f0b28d2f91e33db5e5d04182ff5a42db4b 100644 (file)
@@ -27,8 +27,6 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
 
-static LIST_HEAD(nfnl_acct_list);
-
 struct nf_acct {
        atomic64_t              pkts;
        atomic64_t              bytes;
@@ -53,6 +51,7 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
        struct nf_acct *nfacct, *matching = NULL;
+       struct net *net = sock_net(nfnl);
        char *acct_name;
        unsigned int size = 0;
        u32 flags = 0;
@@ -64,7 +63,7 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
        if (strlen(acct_name) == 0)
                return -EINVAL;
 
-       list_for_each_entry(nfacct, &nfnl_acct_list, head) {
+       list_for_each_entry(nfacct, &net->nfnl_acct_list, head) {
                if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
                        continue;
 
@@ -124,7 +123,7 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
                             be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
        }
        atomic_set(&nfacct->refcnt, 1);
-       list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
+       list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list);
        return 0;
 }
 
@@ -185,6 +184,7 @@ nla_put_failure:
 static int
 nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct net *net = sock_net(skb->sk);
        struct nf_acct *cur, *last;
        const struct nfacct_filter *filter = cb->data;
 
@@ -196,7 +196,7 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
                cb->args[1] = 0;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+       list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
                if (last) {
                        if (cur != last)
                                continue;
@@ -257,6 +257,7 @@ static int
 nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
+       struct net *net = sock_net(nfnl);
        int ret = -ENOENT;
        struct nf_acct *cur;
        char *acct_name;
@@ -283,7 +284,7 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
                return -EINVAL;
        acct_name = nla_data(tb[NFACCT_NAME]);
 
-       list_for_each_entry(cur, &nfnl_acct_list, head) {
+       list_for_each_entry(cur, &net->nfnl_acct_list, head) {
                struct sk_buff *skb2;
 
                if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
@@ -336,19 +337,20 @@ static int
 nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
+       struct net *net = sock_net(nfnl);
        char *acct_name;
        struct nf_acct *cur;
        int ret = -ENOENT;
 
        if (!tb[NFACCT_NAME]) {
-               list_for_each_entry(cur, &nfnl_acct_list, head)
+               list_for_each_entry(cur, &net->nfnl_acct_list, head)
                        nfnl_acct_try_del(cur);
 
                return 0;
        }
        acct_name = nla_data(tb[NFACCT_NAME]);
 
-       list_for_each_entry(cur, &nfnl_acct_list, head) {
+       list_for_each_entry(cur, &net->nfnl_acct_list, head) {
                if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
                        continue;
 
@@ -394,12 +396,12 @@ static const struct nfnetlink_subsystem nfnl_acct_subsys = {
 
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
 
-struct nf_acct *nfnl_acct_find_get(const char *acct_name)
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
 {
        struct nf_acct *cur, *acct = NULL;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+       list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
                if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
                        continue;
 
@@ -422,7 +424,9 @@ EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
 
 void nfnl_acct_put(struct nf_acct *acct)
 {
-       atomic_dec(&acct->refcnt);
+       if (atomic_dec_and_test(&acct->refcnt))
+               kfree_rcu(acct, rcu_head);
+
        module_put(THIS_MODULE);
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_put);
@@ -478,34 +482,59 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
 
+static int __net_init nfnl_acct_net_init(struct net *net)
+{
+       INIT_LIST_HEAD(&net->nfnl_acct_list);
+
+       return 0;
+}
+
+static void __net_exit nfnl_acct_net_exit(struct net *net)
+{
+       struct nf_acct *cur, *tmp;
+
+       list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) {
+               list_del_rcu(&cur->head);
+
+               if (atomic_dec_and_test(&cur->refcnt))
+                       kfree_rcu(cur, rcu_head);
+       }
+}
+
+static struct pernet_operations nfnl_acct_ops = {
+        .init   = nfnl_acct_net_init,
+        .exit   = nfnl_acct_net_exit,
+};
+
 static int __init nfnl_acct_init(void)
 {
        int ret;
 
+       ret = register_pernet_subsys(&nfnl_acct_ops);
+       if (ret < 0) {
+               pr_err("nfnl_acct_init: failed to register pernet ops\n");
+               goto err_out;
+       }
+
        pr_info("nfnl_acct: registering with nfnetlink.\n");
        ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
        if (ret < 0) {
                pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
-               goto err_out;
+               goto cleanup_pernet;
        }
        return 0;
+
+cleanup_pernet:
+       unregister_pernet_subsys(&nfnl_acct_ops);
 err_out:
        return ret;
 }
 
 static void __exit nfnl_acct_exit(void)
 {
-       struct nf_acct *cur, *tmp;
-
        pr_info("nfnl_acct: unregistering from nfnetlink.\n");
        nfnetlink_subsys_unregister(&nfnl_acct_subsys);
-
-       list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
-               list_del_rcu(&cur->head);
-               /* We are sure that our objects have no clients at this point,
-                * it's safe to release them all without checking refcnt. */
-               kfree_rcu(cur, rcu_head);
-       }
+       unregister_pernet_subsys(&nfnl_acct_ops);
 }
 
 module_init(nfnl_acct_init);
index 17591239229f75564b944dc7db61e1dae6a2f1f1..1067fb4c1ffa2ec24988143fc7ec8d134dc9dda6 100644 (file)
 #include <net/netfilter/nf_tables.h>
 
 struct nft_counter {
-       seqlock_t       lock;
        u64             bytes;
        u64             packets;
 };
 
+struct nft_counter_percpu {
+       struct nft_counter      counter;
+       struct u64_stats_sync   syncp;
+};
+
+struct nft_counter_percpu_priv {
+       struct nft_counter_percpu __percpu *counter;
+};
+
 static void nft_counter_eval(const struct nft_expr *expr,
                             struct nft_regs *regs,
                             const struct nft_pktinfo *pkt)
 {
-       struct nft_counter *priv = nft_expr_priv(expr);
-
-       write_seqlock_bh(&priv->lock);
-       priv->bytes += pkt->skb->len;
-       priv->packets++;
-       write_sequnlock_bh(&priv->lock);
+       struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu *this_cpu;
+
+       local_bh_disable();
+       this_cpu = this_cpu_ptr(priv->counter);
+       u64_stats_update_begin(&this_cpu->syncp);
+       this_cpu->counter.bytes += pkt->skb->len;
+       this_cpu->counter.packets++;
+       u64_stats_update_end(&this_cpu->syncp);
+       local_bh_enable();
 }
 
 static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
-       struct nft_counter *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu *cpu_stats;
+       struct nft_counter total;
+       u64 bytes, packets;
        unsigned int seq;
-       u64 bytes;
-       u64 packets;
-
-       do {
-               seq = read_seqbegin(&priv->lock);
-               bytes   = priv->bytes;
-               packets = priv->packets;
-       } while (read_seqretry(&priv->lock, seq));
-
-       if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
-               goto nla_put_failure;
-       if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+       int cpu;
+
+       memset(&total, 0, sizeof(total));
+       for_each_possible_cpu(cpu) {
+               cpu_stats = per_cpu_ptr(priv->counter, cpu);
+               do {
+                       seq     = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       bytes   = cpu_stats->counter.bytes;
+                       packets = cpu_stats->counter.packets;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+
+               total.packets += packets;
+               total.bytes += bytes;
+       }
+
+       if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
+           nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
                goto nla_put_failure;
        return 0;
 
@@ -67,23 +87,44 @@ static int nft_counter_init(const struct nft_ctx *ctx,
                            const struct nft_expr *expr,
                            const struct nlattr * const tb[])
 {
-       struct nft_counter *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+       struct nft_counter_percpu __percpu *cpu_stats;
+       struct nft_counter_percpu *this_cpu;
+
+       cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
+       if (cpu_stats == NULL)
+               return ENOMEM;
+
+       preempt_disable();
+       this_cpu = this_cpu_ptr(cpu_stats);
+       if (tb[NFTA_COUNTER_PACKETS]) {
+               this_cpu->counter.packets =
+                       be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+       }
+       if (tb[NFTA_COUNTER_BYTES]) {
+               this_cpu->counter.bytes =
+                       be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+       }
+       preempt_enable();
+       priv->counter = cpu_stats;
+       return 0;
+}
 
-       if (tb[NFTA_COUNTER_PACKETS])
-               priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
-       if (tb[NFTA_COUNTER_BYTES])
-               priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+static void nft_counter_destroy(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
 
-       seqlock_init(&priv->lock);
-       return 0;
+       free_percpu(priv->counter);
 }
 
 static struct nft_expr_type nft_counter_type;
 static const struct nft_expr_ops nft_counter_ops = {
        .type           = &nft_counter_type,
-       .size           = NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_counter_percpu_priv)),
        .eval           = nft_counter_eval,
        .init           = nft_counter_init,
+       .destroy        = nft_counter_destroy,
        .dump           = nft_counter_dump,
 };
 
index 435c1ccd6c0e6a74266b2054a62f603d038ecb00..5d67938f8b2f27a090f47386fe4ec889c0507b13 100644 (file)
 static DEFINE_SPINLOCK(limit_lock);
 
 struct nft_limit {
+       u64             last;
        u64             tokens;
+       u64             tokens_max;
        u64             rate;
-       u64             unit;
-       unsigned long   stamp;
+       u64             nsecs;
+       u32             burst;
 };
 
-static void nft_limit_eval(const struct nft_expr *expr,
-                          struct nft_regs *regs,
-                          const struct nft_pktinfo *pkt)
+static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
 {
-       struct nft_limit *priv = nft_expr_priv(expr);
+       u64 now, tokens;
+       s64 delta;
 
        spin_lock_bh(&limit_lock);
-       if (time_after_eq(jiffies, priv->stamp)) {
-               priv->tokens = priv->rate;
-               priv->stamp = jiffies + priv->unit * HZ;
-       }
-
-       if (priv->tokens >= 1) {
-               priv->tokens--;
+       now = ktime_get_ns();
+       tokens = limit->tokens + now - limit->last;
+       if (tokens > limit->tokens_max)
+               tokens = limit->tokens_max;
+
+       limit->last = now;
+       delta = tokens - cost;
+       if (delta >= 0) {
+               limit->tokens = delta;
                spin_unlock_bh(&limit_lock);
-               return;
+               return false;
        }
+       limit->tokens = tokens;
        spin_unlock_bh(&limit_lock);
-
-       regs->verdict.code = NFT_BREAK;
+       return true;
 }
 
-static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
-       [NFTA_LIMIT_RATE]       = { .type = NLA_U64 },
-       [NFTA_LIMIT_UNIT]       = { .type = NLA_U64 },
-};
-
-static int nft_limit_init(const struct nft_ctx *ctx,
-                         const struct nft_expr *expr,
+static int nft_limit_init(struct nft_limit *limit,
                          const struct nlattr * const tb[])
 {
-       struct nft_limit *priv = nft_expr_priv(expr);
+       u64 unit;
 
        if (tb[NFTA_LIMIT_RATE] == NULL ||
            tb[NFTA_LIMIT_UNIT] == NULL)
                return -EINVAL;
 
-       priv->rate   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
-       priv->unit   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
-       priv->stamp  = jiffies + priv->unit * HZ;
-       priv->tokens = priv->rate;
+       limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+       unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+       limit->nsecs = unit * NSEC_PER_SEC;
+       if (limit->rate == 0 || limit->nsecs < unit)
+               return -EOVERFLOW;
+       limit->tokens = limit->tokens_max = limit->nsecs;
+
+       if (tb[NFTA_LIMIT_BURST]) {
+               u64 rate;
+
+               limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+
+               rate = limit->rate + limit->burst;
+               if (rate < limit->rate)
+                       return -EOVERFLOW;
+
+               limit->rate = rate;
+       }
+       limit->last = ktime_get_ns();
+
        return 0;
 }
 
-static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
+                         enum nft_limit_type type)
 {
-       const struct nft_limit *priv = nft_expr_priv(expr);
+       u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
+       u64 rate = limit->rate - limit->burst;
 
-       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
-               goto nla_put_failure;
-       if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate)) ||
+           nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs)) ||
+           nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
+           nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)))
                goto nla_put_failure;
        return 0;
 
@@ -84,18 +100,114 @@ nla_put_failure:
        return -1;
 }
 
+struct nft_limit_pkts {
+       struct nft_limit        limit;
+       u64                     cost;
+};
+
+static void nft_limit_pkts_eval(const struct nft_expr *expr,
+                               struct nft_regs *regs,
+                               const struct nft_pktinfo *pkt)
+{
+       struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+       if (nft_limit_eval(&priv->limit, priv->cost))
+               regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+       [NFTA_LIMIT_RATE]       = { .type = NLA_U64 },
+       [NFTA_LIMIT_UNIT]       = { .type = NLA_U64 },
+       [NFTA_LIMIT_BURST]      = { .type = NLA_U32 },
+       [NFTA_LIMIT_TYPE]       = { .type = NLA_U32 },
+};
+
+static int nft_limit_pkts_init(const struct nft_ctx *ctx,
+                              const struct nft_expr *expr,
+                              const struct nlattr * const tb[])
+{
+       struct nft_limit_pkts *priv = nft_expr_priv(expr);
+       int err;
+
+       err = nft_limit_init(&priv->limit, tb);
+       if (err < 0)
+               return err;
+
+       priv->cost = div_u64(priv->limit.nsecs, priv->limit.rate);
+       return 0;
+}
+
+static int nft_limit_pkts_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+       return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
+}
+
 static struct nft_expr_type nft_limit_type;
-static const struct nft_expr_ops nft_limit_ops = {
+static const struct nft_expr_ops nft_limit_pkts_ops = {
+       .type           = &nft_limit_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
+       .eval           = nft_limit_pkts_eval,
+       .init           = nft_limit_pkts_init,
+       .dump           = nft_limit_pkts_dump,
+};
+
+static void nft_limit_pkt_bytes_eval(const struct nft_expr *expr,
+                                    struct nft_regs *regs,
+                                    const struct nft_pktinfo *pkt)
+{
+       struct nft_limit *priv = nft_expr_priv(expr);
+       u64 cost = div_u64(priv->nsecs * pkt->skb->len, priv->rate);
+
+       if (nft_limit_eval(priv, cost))
+               regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_limit_pkt_bytes_init(const struct nft_ctx *ctx,
+                                   const struct nft_expr *expr,
+                                   const struct nlattr * const tb[])
+{
+       struct nft_limit *priv = nft_expr_priv(expr);
+
+       return nft_limit_init(priv, tb);
+}
+
+static int nft_limit_pkt_bytes_dump(struct sk_buff *skb,
+                                   const struct nft_expr *expr)
+{
+       const struct nft_limit *priv = nft_expr_priv(expr);
+
+       return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
+}
+
+static const struct nft_expr_ops nft_limit_pkt_bytes_ops = {
        .type           = &nft_limit_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_limit)),
-       .eval           = nft_limit_eval,
-       .init           = nft_limit_init,
-       .dump           = nft_limit_dump,
+       .eval           = nft_limit_pkt_bytes_eval,
+       .init           = nft_limit_pkt_bytes_init,
+       .dump           = nft_limit_pkt_bytes_dump,
 };
 
+static const struct nft_expr_ops *
+nft_limit_select_ops(const struct nft_ctx *ctx,
+                    const struct nlattr * const tb[])
+{
+       if (tb[NFTA_LIMIT_TYPE] == NULL)
+               return &nft_limit_pkts_ops;
+
+       switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
+       case NFT_LIMIT_PKTS:
+               return &nft_limit_pkts_ops;
+       case NFT_LIMIT_PKT_BYTES:
+               return &nft_limit_pkt_bytes_ops;
+       }
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
 static struct nft_expr_type nft_limit_type __read_mostly = {
        .name           = "limit",
-       .ops            = &nft_limit_ops,
+       .select_ops     = nft_limit_select_ops,
        .policy         = nft_limit_policy,
        .maxattr        = NFTA_LIMIT_MAX,
        .flags          = NFT_EXPR_STATEFUL,
index 94fb3b27a2c54393091602e0e96b2634ff8ceb1b..09b4b07eb67644fdc90ef357378c46d243b7a642 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/if_vlan.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/netlink.h>
 #include <net/netfilter/nf_tables_core.h>
 #include <net/netfilter/nf_tables.h>
 
+/* add vlan header into the user buffer for if tag was removed by offloads */
+static bool
+nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+{
+       int mac_off = skb_mac_header(skb) - skb->data;
+       u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
+       struct vlan_ethhdr veth;
+
+       vlanh = (u8 *) &veth;
+       if (offset < ETH_HLEN) {
+               u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
+
+               if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
+                       return false;
+
+               veth.h_vlan_proto = skb->vlan_proto;
+
+               memcpy(dst_u8, vlanh + offset, ethlen);
+
+               len -= ethlen;
+               if (len == 0)
+                       return true;
+
+               dst_u8 += ethlen;
+               offset = ETH_HLEN;
+       } else if (offset >= VLAN_ETH_HLEN) {
+               offset -= VLAN_HLEN;
+               goto skip;
+       }
+
+       veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+       veth.h_vlan_encapsulated_proto = skb->protocol;
+
+       vlanh += offset;
+
+       vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
+       memcpy(dst_u8, vlanh, vlan_len);
+
+       len -= vlan_len;
+       if (!len)
+               return true;
+
+       dst_u8 += vlan_len;
+ skip:
+       return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+}
+
 static void nft_payload_eval(const struct nft_expr *expr,
                             struct nft_regs *regs,
                             const struct nft_pktinfo *pkt)
@@ -26,10 +74,18 @@ static void nft_payload_eval(const struct nft_expr *expr,
        u32 *dest = &regs->data[priv->dreg];
        int offset;
 
+       dest[priv->len / NFT_REG32_SIZE] = 0;
        switch (priv->base) {
        case NFT_PAYLOAD_LL_HEADER:
                if (!skb_mac_header_was_set(skb))
                        goto err;
+
+               if (skb_vlan_tag_present(skb)) {
+                       if (!nft_payload_copy_vlan(dest, skb,
+                                                  priv->offset, priv->len))
+                               goto err;
+                       return;
+               }
                offset = skb_mac_header(skb) - skb->data;
                break;
        case NFT_PAYLOAD_NETWORK_HEADER:
@@ -43,7 +99,6 @@ static void nft_payload_eval(const struct nft_expr *expr,
        }
        offset += priv->offset;
 
-       dest[priv->len / NFT_REG32_SIZE] = 0;
        if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
                goto err;
        return;
index c6630030c9121c7af27a3052ad776cd6646eb601..8e524898ccea234a2b5cae3bdfaf2cd72d023238 100644 (file)
@@ -181,9 +181,23 @@ out:
 #endif
 }
 
+static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
+{
+       switch (info->flags & (XT_CT_ZONE_DIR_ORIG |
+                              XT_CT_ZONE_DIR_REPL)) {
+       case XT_CT_ZONE_DIR_ORIG:
+               return NF_CT_ZONE_DIR_ORIG;
+       case XT_CT_ZONE_DIR_REPL:
+               return NF_CT_ZONE_DIR_REPL;
+       default:
+               return NF_CT_DEFAULT_ZONE_DIR;
+       }
+}
+
 static int xt_ct_tg_check(const struct xt_tgchk_param *par,
                          struct xt_ct_target_info_v1 *info)
 {
+       struct nf_conntrack_zone zone;
        struct nf_conn *ct;
        int ret = -EOPNOTSUPP;
 
@@ -193,7 +207,9 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
        }
 
 #ifndef CONFIG_NF_CONNTRACK_ZONES
-       if (info->zone)
+       if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
+                                        XT_CT_ZONE_DIR_REPL |
+                                        XT_CT_ZONE_MARK))
                goto err1;
 #endif
 
@@ -201,10 +217,17 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
        if (ret < 0)
                goto err1;
 
-       ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
-       ret = PTR_ERR(ct);
-       if (IS_ERR(ct))
+       memset(&zone, 0, sizeof(zone));
+       zone.id = info->zone;
+       zone.dir = xt_ct_flags_to_dir(info);
+       if (info->flags & XT_CT_ZONE_MARK)
+               zone.flags |= NF_CT_FLAG_MARK;
+
+       ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
+       if (!ct) {
+               ret = -ENOMEM;
                goto err2;
+       }
 
        ret = 0;
        if ((info->ct_events || info->exp_events) &&
index 8c3190e2fc6abad6394ba5498762ba0502c34d58..8c02501a530f4b481f9a6f4248957893801af2b2 100644 (file)
@@ -144,7 +144,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
 
                        inet_proto_csum_replace2(&tcph->check, skb,
                                                 htons(oldmss), htons(newmss),
-                                                0);
+                                                false);
                        return 0;
                }
        }
@@ -185,18 +185,18 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
 
        inet_proto_csum_replace2(&tcph->check, skb,
-                                htons(len), htons(len + TCPOLEN_MSS), 1);
+                                htons(len), htons(len + TCPOLEN_MSS), true);
        opt[0] = TCPOPT_MSS;
        opt[1] = TCPOLEN_MSS;
        opt[2] = (newmss & 0xff00) >> 8;
        opt[3] = newmss & 0x00ff;
 
-       inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), 0);
+       inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
 
        oldval = ((__be16 *)tcph)[6];
        tcph->doff += TCPOLEN_MSS/4;
        inet_proto_csum_replace2(&tcph->check, skb,
-                                oldval, ((__be16 *)tcph)[6], 0);
+                                oldval, ((__be16 *)tcph)[6], false);
        return TCPOLEN_MSS;
 }
 
index 625fa1d636a01ccacb43c2fcae902d65a466b9db..eb92bffff11ccb22690ec0cf76ff25076d6a8cb6 100644 (file)
@@ -80,7 +80,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
                                n <<= 8;
                        }
                        inet_proto_csum_replace2(&tcph->check, skb, htons(o),
-                                                htons(n), 0);
+                                                htons(n), false);
                }
                memset(opt + i, TCPOPT_NOP, optl);
        }
index c5d6556dbc5e407cffca198ac5fe66b97a0cb908..49fee6aa2c0aa03b8af6bf9c3358d300d8b9ac7c 100644 (file)
  *     modify it under the terms of the GNU General Public License
  *     version 2 or later, as published by the Free Software Foundation.
  */
-#include <linux/ip.h>
 #include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/route.h>
 #include <linux/skbuff.h>
-#include <linux/notifier.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/route.h>
+#include <linux/route.h>
 #include <linux/netfilter/x_tables.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
 #include <linux/netfilter/xt_TEE.h>
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-#      define WITH_CONNTRACK 1
-#      include <net/netfilter/nf_conntrack.h>
-#endif
-
 struct xt_tee_priv {
        struct notifier_block   notifier;
        struct xt_tee_tginfo    *tginfo;
@@ -38,161 +27,24 @@ struct xt_tee_priv {
 
 static const union nf_inet_addr tee_zero_address;
 
-static struct net *pick_net(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-       const struct dst_entry *dst;
-
-       if (skb->dev != NULL)
-               return dev_net(skb->dev);
-       dst = skb_dst(skb);
-       if (dst != NULL && dst->dev != NULL)
-               return dev_net(dst->dev);
-#endif
-       return &init_net;
-}
-
-static bool
-tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
-       const struct iphdr *iph = ip_hdr(skb);
-       struct net *net = pick_net(skb);
-       struct rtable *rt;
-       struct flowi4 fl4;
-
-       memset(&fl4, 0, sizeof(fl4));
-       if (info->priv) {
-               if (info->priv->oif == -1)
-                       return false;
-               fl4.flowi4_oif = info->priv->oif;
-       }
-       fl4.daddr = info->gw.ip;
-       fl4.flowi4_tos = RT_TOS(iph->tos);
-       fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
-       fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
-       rt = ip_route_output_key(net, &fl4);
-       if (IS_ERR(rt))
-               return false;
-
-       skb_dst_drop(skb);
-       skb_dst_set(skb, &rt->dst);
-       skb->dev      = rt->dst.dev;
-       skb->protocol = htons(ETH_P_IP);
-       return true;
-}
-
 static unsigned int
 tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_tee_tginfo *info = par->targinfo;
-       struct iphdr *iph;
 
-       if (__this_cpu_read(nf_skb_duplicated))
-               return XT_CONTINUE;
-       /*
-        * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
-        * the original skb, which should continue on its way as if nothing has
-        * happened. The copy should be independently delivered to the TEE
-        * --gateway.
-        */
-       skb = pskb_copy(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               return XT_CONTINUE;
-
-#ifdef WITH_CONNTRACK
-       /* Avoid counting cloned packets towards the original connection. */
-       nf_conntrack_put(skb->nfct);
-       skb->nfct     = &nf_ct_untracked_get()->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get(skb->nfct);
-#endif
-       /*
-        * If we are in PREROUTING/INPUT, the checksum must be recalculated
-        * since the length could have changed as a result of defragmentation.
-        *
-        * We also decrease the TTL to mitigate potential TEE loops
-        * between two hosts.
-        *
-        * Set %IP_DF so that the original source is notified of a potentially
-        * decreased MTU on the clone route. IPv6 does this too.
-        */
-       iph = ip_hdr(skb);
-       iph->frag_off |= htons(IP_DF);
-       if (par->hooknum == NF_INET_PRE_ROUTING ||
-           par->hooknum == NF_INET_LOCAL_IN)
-               --iph->ttl;
-       ip_send_check(iph);
+       nf_dup_ipv4(skb, par->hooknum, &info->gw.in, info->priv->oif);
 
-       if (tee_tg_route4(skb, info)) {
-               __this_cpu_write(nf_skb_duplicated, true);
-               ip_local_out(skb);
-               __this_cpu_write(nf_skb_duplicated, false);
-       } else {
-               kfree_skb(skb);
-       }
        return XT_CONTINUE;
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static bool
-tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
-       const struct ipv6hdr *iph = ipv6_hdr(skb);
-       struct net *net = pick_net(skb);
-       struct dst_entry *dst;
-       struct flowi6 fl6;
-
-       memset(&fl6, 0, sizeof(fl6));
-       if (info->priv) {
-               if (info->priv->oif == -1)
-                       return false;
-               fl6.flowi6_oif = info->priv->oif;
-       }
-       fl6.daddr = info->gw.in6;
-       fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
-                          (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
-       fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
-       dst = ip6_route_output(net, NULL, &fl6);
-       if (dst->error) {
-               dst_release(dst);
-               return false;
-       }
-       skb_dst_drop(skb);
-       skb_dst_set(skb, dst);
-       skb->dev      = dst->dev;
-       skb->protocol = htons(ETH_P_IPV6);
-       return true;
-}
-
 static unsigned int
 tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_tee_tginfo *info = par->targinfo;
 
-       if (__this_cpu_read(nf_skb_duplicated))
-               return XT_CONTINUE;
-       skb = pskb_copy(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               return XT_CONTINUE;
+       nf_dup_ipv6(skb, par->hooknum, &info->gw.in6, info->priv->oif);
 
-#ifdef WITH_CONNTRACK
-       nf_conntrack_put(skb->nfct);
-       skb->nfct     = &nf_ct_untracked_get()->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get(skb->nfct);
-#endif
-       if (par->hooknum == NF_INET_PRE_ROUTING ||
-           par->hooknum == NF_INET_LOCAL_IN) {
-               struct ipv6hdr *iph = ipv6_hdr(skb);
-               --iph->hop_limit;
-       }
-       if (tee_tg_route6(skb, info)) {
-               __this_cpu_write(nf_skb_duplicated, true);
-               ip6_local_out(skb);
-               __this_cpu_write(nf_skb_duplicated, false);
-       } else {
-               kfree_skb(skb);
-       }
        return XT_CONTINUE;
 }
 #endif
index 29ba6218a820e7cc8e9363db91312cc27c09004e..075d89d94d28f4deb87f473dd787f61395fa2681 100644 (file)
@@ -134,7 +134,7 @@ static bool add_hlist(struct hlist_head *head,
 static unsigned int check_hlist(struct net *net,
                                struct hlist_head *head,
                                const struct nf_conntrack_tuple *tuple,
-                               u16 zone,
+                               const struct nf_conntrack_zone *zone,
                                bool *addit)
 {
        const struct nf_conntrack_tuple_hash *found;
@@ -201,7 +201,7 @@ static unsigned int
 count_tree(struct net *net, struct rb_root *root,
           const struct nf_conntrack_tuple *tuple,
           const union nf_inet_addr *addr, const union nf_inet_addr *mask,
-          u8 family, u16 zone)
+          u8 family, const struct nf_conntrack_zone *zone)
 {
        struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
        struct rb_node **rbnode, *parent;
@@ -290,7 +290,8 @@ static int count_them(struct net *net,
                      const struct nf_conntrack_tuple *tuple,
                      const union nf_inet_addr *addr,
                      const union nf_inet_addr *mask,
-                     u_int8_t family, u16 zone)
+                     u_int8_t family,
+                     const struct nf_conntrack_zone *zone)
 {
        struct rb_root *root;
        int count;
@@ -321,10 +322,10 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
        union nf_inet_addr addr;
        struct nf_conntrack_tuple tuple;
        const struct nf_conntrack_tuple *tuple_ptr = &tuple;
+       const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
        enum ip_conntrack_info ctinfo;
        const struct nf_conn *ct;
        unsigned int connections;
-       u16 zone = NF_CT_DEFAULT_ZONE;
 
        ct = nf_ct_get(skb, &ctinfo);
        if (ct != NULL) {
index 8c646ed9c921bca1fbf507c1aa97c1dca60d8df1..3048a7e3a90a5a27887b7e4ff731d00098f2c928 100644 (file)
@@ -37,7 +37,7 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
        struct xt_nfacct_match_info *info = par->matchinfo;
        struct nf_acct *nfacct;
 
-       nfacct = nfnl_acct_find_get(info->name);
+       nfacct = nfnl_acct_find_get(par->net, info->name);
        if (nfacct == NULL) {
                pr_info("xt_nfacct: accounting object with name `%s' "
                        "does not exists\n", info->name);
index d8e2e3918ce2fd95637c4cba8bfc4886feb91ea6..67d2104778636c62e7d3fa94c73ce5fb2cd4cb7d 100644 (file)
@@ -1096,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
 
        err = __netlink_insert(table, sk);
        if (err) {
+               /* In case the hashtable backend returns with -EBUSY
+                * from here, it must not escape to the caller.
+                */
+               if (unlikely(err == -EBUSY))
+                       err = -EOVERFLOW;
                if (err == -EEXIST)
                        err = -EADDRINUSE;
                nlk_sk(sk)->portid = 0;
index 95af2d24d5be7b351c097caa9cd1aead2bee2139..943889b87a34bdd33e9c4a8c1b01df04808b3e00 100644 (file)
@@ -351,6 +351,20 @@ int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload)
 }
 EXPORT_SYMBOL(nci_prop_cmd);
 
+int nci_core_reset(struct nci_dev *ndev)
+{
+       return __nci_request(ndev, nci_reset_req, 0,
+                            msecs_to_jiffies(NCI_RESET_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_reset);
+
+int nci_core_init(struct nci_dev *ndev)
+{
+       return __nci_request(ndev, nci_init_req, 0,
+                            msecs_to_jiffies(NCI_INIT_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_init);
+
 static int nci_open_device(struct nci_dev *ndev)
 {
        int rc = 0;
@@ -388,6 +402,10 @@ static int nci_open_device(struct nci_dev *ndev)
                                   msecs_to_jiffies(NCI_INIT_TIMEOUT));
        }
 
+       if (ndev->ops->post_setup) {
+               rc = ndev->ops->post_setup(ndev);
+       }
+
        if (!rc) {
                rc = __nci_request(ndev, nci_init_complete_req, 0,
                                   msecs_to_jiffies(NCI_INIT_TIMEOUT));
index af002df640c7fa15eb412f718bb5508589429f5c..609f92283d1b74d118eb00d0fcedd6ece105e844 100644 (file)
@@ -233,7 +233,7 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
        r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
                        msecs_to_jiffies(NCI_DATA_TIMEOUT));
 
-       if (r == NCI_STATUS_OK)
+       if (r == NCI_STATUS_OK && skb)
                *skb = conn_info->rx_skb;
 
        return r;
index f85f37ed19b23d0b9509e243d2c56b3070c6cbb6..853172c27f68043f47ddd45a1e778e9e87efd163 100644 (file)
@@ -63,6 +63,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
                                     .len = NFC_FIRMWARE_NAME_MAXSIZE },
        [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
+       [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
+
 };
 
 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
@@ -1503,7 +1505,7 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
        u32 dev_idx, vid, subcmd;
        u8 *data;
        size_t data_len;
-       int i;
+       int i, err;
 
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
            !info->attrs[NFC_ATTR_VENDOR_ID] ||
@@ -1518,12 +1520,13 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
        if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
                return -ENODEV;
 
-       data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
-       if (data) {
+       if (info->attrs[NFC_ATTR_VENDOR_DATA]) {
+               data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
                data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
                if (data_len == 0)
                        return -EINVAL;
        } else {
+               data = NULL;
                data_len = 0;
        }
 
@@ -1533,12 +1536,92 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
                if (cmd->vendor_id != vid || cmd->subcmd != subcmd)
                        continue;
 
-               return cmd->doit(dev, data, data_len);
+               dev->cur_cmd_info = info;
+               err = cmd->doit(dev, data, data_len);
+               dev->cur_cmd_info = NULL;
+               return err;
        }
 
        return -EOPNOTSUPP;
 }
 
+/* message building helper */
+static inline void *nfc_hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
+                               int flags, u8 cmd)
+{
+       /* since there is no private header just add the generic one */
+       return genlmsg_put(skb, portid, seq, &nfc_genl_family, flags, cmd);
+}
+
+static struct sk_buff *
+__nfc_alloc_vendor_cmd_skb(struct nfc_dev *dev, int approxlen,
+                          u32 portid, u32 seq,
+                          enum nfc_attrs attr,
+                          u32 oui, u32 subcmd, gfp_t gfp)
+{
+       struct sk_buff *skb;
+       void *hdr;
+
+       skb = nlmsg_new(approxlen + 100, gfp);
+       if (!skb)
+               return NULL;
+
+       hdr = nfc_hdr_put(skb, portid, seq, 0, NFC_CMD_VENDOR);
+       if (!hdr) {
+               kfree_skb(skb);
+               return NULL;
+       }
+
+       if (nla_put_u32(skb, NFC_ATTR_DEVICE_INDEX, dev->idx))
+               goto nla_put_failure;
+       if (nla_put_u32(skb, NFC_ATTR_VENDOR_ID, oui))
+               goto nla_put_failure;
+       if (nla_put_u32(skb, NFC_ATTR_VENDOR_SUBCMD, subcmd))
+               goto nla_put_failure;
+
+       ((void **)skb->cb)[0] = dev;
+       ((void **)skb->cb)[1] = hdr;
+
+       return skb;
+
+nla_put_failure:
+       kfree_skb(skb);
+       return NULL;
+}
+
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+                                                enum nfc_attrs attr,
+                                                u32 oui, u32 subcmd,
+                                                int approxlen)
+{
+       if (WARN_ON(!dev->cur_cmd_info))
+               return NULL;
+
+       return __nfc_alloc_vendor_cmd_skb(dev, approxlen,
+                                         dev->cur_cmd_info->snd_portid,
+                                         dev->cur_cmd_info->snd_seq, attr,
+                                         oui, subcmd, GFP_KERNEL);
+}
+EXPORT_SYMBOL(__nfc_alloc_vendor_cmd_reply_skb);
+
+int nfc_vendor_cmd_reply(struct sk_buff *skb)
+{
+       struct nfc_dev *dev = ((void **)skb->cb)[0];
+       void *hdr = ((void **)skb->cb)[1];
+
+       /* clear CB data for netlink core to own from now on */
+       memset(skb->cb, 0, sizeof(skb->cb));
+
+       if (WARN_ON(!dev->cur_cmd_info)) {
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       genlmsg_end(skb, hdr);
+       return genlmsg_reply(skb, dev->cur_cmd_info);
+}
+EXPORT_SYMBOL(nfc_vendor_cmd_reply);
+
 static const struct genl_ops nfc_genl_ops[] = {
        {
                .cmd = NFC_CMD_GET_DEVICE,
index 15840401a2ce584356a3fff7390bb11cbb8f7752..422dc0567de9d19ec4d46c0c38a9efd1be6caca2 100644 (file)
@@ -34,7 +34,7 @@ config OPENVSWITCH
 config OPENVSWITCH_GRE
        tristate "Open vSwitch GRE tunneling support"
        depends on OPENVSWITCH
-       depends on NET_IPGRE_DEMUX
+       depends on NET_IPGRE
        default OPENVSWITCH
        ---help---
          If you say Y here, then the Open vSwitch will be able create GRE
index cf04c2f8b32a57bfec8e024d2db95dd8c0468b3f..4f4200717bef984d02b9232ea764180f9e8203d7 100644 (file)
@@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
        return 0;
 }
 
-static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
-                       __be32 *addr, __be32 new_addr)
+static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
+                                 __be32 addr, __be32 new_addr)
 {
        int transport_len = skb->len - skb_transport_offset(skb);
 
+       if (nh->frag_off & htons(IP_OFFSET))
+               return;
+
        if (nh->protocol == IPPROTO_TCP) {
                if (likely(transport_len >= sizeof(struct tcphdr)))
                        inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
-                                                *addr, new_addr, 1);
+                                                addr, new_addr, true);
        } else if (nh->protocol == IPPROTO_UDP) {
                if (likely(transport_len >= sizeof(struct udphdr))) {
                        struct udphdr *uh = udp_hdr(skb);
 
                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
                                inet_proto_csum_replace4(&uh->check, skb,
-                                                        *addr, new_addr, 1);
+                                                        addr, new_addr, true);
                                if (!uh->check)
                                        uh->check = CSUM_MANGLED_0;
                        }
                }
        }
+}
 
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+                       __be32 *addr, __be32 new_addr)
+{
+       update_ip_l4_checksum(skb, nh, *addr, new_addr);
        csum_replace4(&nh->check, *addr, new_addr);
        skb_clear_hash(skb);
        *addr = new_addr;
@@ -308,14 +316,14 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
        if (l4_proto == NEXTHDR_TCP) {
                if (likely(transport_len >= sizeof(struct tcphdr)))
                        inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
-                                                 addr, new_addr, 1);
+                                                 addr, new_addr, true);
        } else if (l4_proto == NEXTHDR_UDP) {
                if (likely(transport_len >= sizeof(struct udphdr))) {
                        struct udphdr *uh = udp_hdr(skb);
 
                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
                                inet_proto_csum_replace16(&uh->check, skb,
-                                                         addr, new_addr, 1);
+                                                         addr, new_addr, true);
                                if (!uh->check)
                                        uh->check = CSUM_MANGLED_0;
                        }
@@ -323,7 +331,7 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
        } else if (l4_proto == NEXTHDR_ICMP) {
                if (likely(transport_len >= sizeof(struct icmp6hdr)))
                        inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
-                                                 skb, addr, new_addr, 1);
+                                                 skb, addr, new_addr, true);
        }
 }
 
@@ -490,7 +498,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
 static void set_tp_port(struct sk_buff *skb, __be16 *port,
                        __be16 new_port, __sum16 *check)
 {
-       inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+       inet_proto_csum_replace2(check, skb, *port, new_port, false);
        *port = new_port;
 }
 
@@ -669,9 +677,12 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
 
        for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
                 a = nla_next(a, &rem)) {
+               u32 probability;
+
                switch (nla_type(a)) {
                case OVS_SAMPLE_ATTR_PROBABILITY:
-                       if (prandom_u32() >= nla_get_u32(a))
+                       probability = nla_get_u32(a);
+                       if (!probability || prandom_u32() > probability)
                                return 0;
                        break;
 
index a6eb77ab1a6456768338a55290955bb29c69749a..4e7a3f7facc2202742671a7ceddca5e253552e42 100644 (file)
@@ -534,19 +534,19 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
                        tun_flags |= TUNNEL_KEY;
                        break;
                case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
-                       SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+                       SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
                                        nla_get_in_addr(a), is_mask);
                        break;
                case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
-                       SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+                       SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
                                        nla_get_in_addr(a), is_mask);
                        break;
                case OVS_TUNNEL_KEY_ATTR_TOS:
-                       SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+                       SW_FLOW_KEY_PUT(match, tun_key.tos,
                                        nla_get_u8(a), is_mask);
                        break;
                case OVS_TUNNEL_KEY_ATTR_TTL:
-                       SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+                       SW_FLOW_KEY_PUT(match, tun_key.ttl,
                                        nla_get_u8(a), is_mask);
                        ttl = true;
                        break;
@@ -609,7 +609,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
        }
 
        if (!is_mask) {
-               if (!match->key->tun_key.ipv4_dst) {
+               if (!match->key->tun_key.u.ipv4.dst) {
                        OVS_NLERR(log, "IPv4 tunnel dst address is zero");
                        return -EINVAL;
                }
@@ -647,18 +647,18 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
        if (output->tun_flags & TUNNEL_KEY &&
            nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
                return -EMSGSIZE;
-       if (output->ipv4_src &&
+       if (output->u.ipv4.src &&
            nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
-                           output->ipv4_src))
+                           output->u.ipv4.src))
                return -EMSGSIZE;
-       if (output->ipv4_dst &&
+       if (output->u.ipv4.dst &&
            nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
-                           output->ipv4_dst))
+                           output->u.ipv4.dst))
                return -EMSGSIZE;
-       if (output->ipv4_tos &&
-           nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
+       if (output->tos &&
+           nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
                return -EMSGSIZE;
-       if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
+       if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
                return -EMSGSIZE;
        if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
            nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
@@ -1116,7 +1116,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
                        /* The userspace does not send tunnel attributes that
                         * are 0, but we should not wildcard them nonetheless.
                         */
-                       if (match->key->tun_key.ipv4_dst)
+                       if (match->key->tun_key.u.ipv4.dst)
                                SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
                                                         0xff, true);
 
@@ -1287,7 +1287,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
        if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
                goto nla_put_failure;
 
-       if ((swkey->tun_key.ipv4_dst || is_mask)) {
+       if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
                const void *opts = NULL;
 
                if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
index 3a9d1dde76ed3457bbe527053d95bd9ec774f100..d22d8e948d0f4b126894a71f8dcc30bc7d6d024d 100644 (file)
@@ -426,7 +426,7 @@ static u32 flow_hash(const struct sw_flow_key *key,
 
 static int flow_key_start(const struct sw_flow_key *key)
 {
-       if (key->tun_key.ipv4_dst)
+       if (key->tun_key.u.ipv4.dst)
                return 0;
        else
                return rounddown(offsetof(struct sw_flow_key, phy),
index 1da3a14d10101f78881c392a06b49aa9e53c9a13..d01bd6360970871db16889eea191b80735ff22a2 100644 (file)
@@ -203,8 +203,8 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
        }
 
        err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
-                             tun_key->ipv4_dst, tun_key->ipv4_tos,
-                             tun_key->ipv4_ttl, df, sport, dport,
+                             tun_key->u.ipv4.dst, tun_key->tos,
+                             tun_key->ttl, df, sport, dport,
                              tun_key->tun_flags, vni, opts_len, opts,
                              !!(tun_key->tun_flags & TUNNEL_CSUM), false);
        if (err < 0)
index b87656c66aaffe3b6ba905cbb84cdc0ec0b0e31b..871801d2ac23e82a26f09f22e13e71797dc9a603 100644 (file)
 
 #include "datapath.h"
 #include "vport.h"
+#include "vport-netdev.h"
 
 static struct vport_ops ovs_gre_vport_ops;
 
-/* Returns the least-significant 32 bits of a __be64. */
-static __be32 be64_get_low32(__be64 x)
+static struct vport *gre_tnl_create(const struct vport_parms *parms)
 {
-#ifdef __BIG_ENDIAN
-       return (__force __be32)x;
-#else
-       return (__force __be32)((__force u64)x >> 32);
-#endif
-}
-
-static __be16 filter_tnl_flags(__be16 flags)
-{
-       return flags & (TUNNEL_CSUM | TUNNEL_KEY);
-}
-
-static struct sk_buff *__build_header(struct sk_buff *skb,
-                                     int tunnel_hlen)
-{
-       struct tnl_ptk_info tpi;
-       const struct ip_tunnel_key *tun_key;
-
-       tun_key = &OVS_CB(skb)->egress_tun_info->key;
-
-       skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
-       if (IS_ERR(skb))
-               return skb;
-
-       tpi.flags = filter_tnl_flags(tun_key->tun_flags);
-       tpi.proto = htons(ETH_P_TEB);
-       tpi.key = be64_get_low32(tun_key->tun_id);
-       tpi.seq = 0;
-       gre_build_header(skb, &tpi, tunnel_hlen);
-
-       return skb;
-}
-
-static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
-{
-#ifdef __BIG_ENDIAN
-       return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
-#else
-       return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
-#endif
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_rcv(struct sk_buff *skb,
-                  const struct tnl_ptk_info *tpi)
-{
-       struct ip_tunnel_info tun_info;
-       struct ovs_net *ovs_net;
-       struct vport *vport;
-       __be64 key;
-
-       ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
-       vport = rcu_dereference(ovs_net->vport_net.gre_vport);
-       if (unlikely(!vport))
-               return PACKET_REJECT;
-
-       key = key_to_tunnel_id(tpi->key, tpi->seq);
-       ip_tunnel_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
-                           filter_tnl_flags(tpi->flags), NULL, 0);
-
-       ovs_vport_receive(vport, skb, &tun_info);
-       return PACKET_RCVD;
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_err(struct sk_buff *skb, u32 info,
-                  const struct tnl_ptk_info *tpi)
-{
-       struct ovs_net *ovs_net;
+       struct net *net = ovs_dp_get_net(parms->dp);
+       struct net_device *dev;
        struct vport *vport;
 
-       ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
-       vport = rcu_dereference(ovs_net->vport_net.gre_vport);
-
-       if (unlikely(!vport))
-               return PACKET_REJECT;
-       else
-               return PACKET_RCVD;
-}
-
-static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
-       struct net *net = ovs_dp_get_net(vport->dp);
-       const struct ip_tunnel_key *tun_key;
-       struct flowi4 fl;
-       struct rtable *rt;
-       int min_headroom;
-       int tunnel_hlen;
-       __be16 df;
-       int err;
-
-       if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
-               err = -EINVAL;
-               goto err_free_skb;
-       }
-
-       tun_key = &OVS_CB(skb)->egress_tun_info->key;
-       rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
-       if (IS_ERR(rt)) {
-               err = PTR_ERR(rt);
-               goto err_free_skb;
-       }
-
-       tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
-
-       min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
-                       + tunnel_hlen + sizeof(struct iphdr)
-                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-       if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
-               int head_delta = SKB_DATA_ALIGN(min_headroom -
-                                               skb_headroom(skb) +
-                                               16);
-               err = pskb_expand_head(skb, max_t(int, head_delta, 0),
-                                       0, GFP_ATOMIC);
-               if (unlikely(err))
-                       goto err_free_rt;
-       }
-
-       skb = vlan_hwaccel_push_inside(skb);
-       if (unlikely(!skb)) {
-               err = -ENOMEM;
-               goto err_free_rt;
-       }
-
-       /* Push Tunnel header. */
-       skb = __build_header(skb, tunnel_hlen);
-       if (IS_ERR(skb)) {
-               err = PTR_ERR(skb);
-               skb = NULL;
-               goto err_free_rt;
+       vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
+       if (IS_ERR(vport))
+               return vport;
+
+       rtnl_lock();
+       dev = gretap_fb_dev_create(net, parms->name, NET_NAME_USER);
+       if (IS_ERR(dev)) {
+               rtnl_unlock();
+               ovs_vport_free(vport);
+               return ERR_CAST(dev);
        }
 
-       df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
-               htons(IP_DF) : 0;
-
-       skb->ignore_df = 1;
-
-       return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
-                            tun_key->ipv4_dst, IPPROTO_GRE,
-                            tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
-err_free_rt:
-       ip_rt_put(rt);
-err_free_skb:
-       kfree_skb(skb);
-       return err;
-}
-
-static struct gre_cisco_protocol gre_protocol = {
-       .handler        = gre_rcv,
-       .err_handler    = gre_err,
-       .priority       = 1,
-};
-
-static int gre_ports;
-static int gre_init(void)
-{
-       int err;
-
-       gre_ports++;
-       if (gre_ports > 1)
-               return 0;
-
-       err = gre_cisco_register(&gre_protocol);
-       if (err)
-               pr_warn("cannot register gre protocol handler\n");
-
-       return err;
-}
-
-static void gre_exit(void)
-{
-       gre_ports--;
-       if (gre_ports > 0)
-               return;
-
-       gre_cisco_unregister(&gre_protocol);
-}
+       dev_change_flags(dev, dev->flags | IFF_UP);
+       rtnl_unlock();
 
-static const char *gre_get_name(const struct vport *vport)
-{
-       return vport_priv(vport);
+       return vport;
 }
 
 static struct vport *gre_create(const struct vport_parms *parms)
 {
-       struct net *net = ovs_dp_get_net(parms->dp);
-       struct ovs_net *ovs_net;
        struct vport *vport;
-       int err;
-
-       err = gre_init();
-       if (err)
-               return ERR_PTR(err);
-
-       ovs_net = net_generic(net, ovs_net_id);
-       if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
-               vport = ERR_PTR(-EEXIST);
-               goto error;
-       }
 
-       vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
+       vport = gre_tnl_create(parms);
        if (IS_ERR(vport))
-               goto error;
-
-       strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
-       rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
-       return vport;
-
-error:
-       gre_exit();
-       return vport;
-}
-
-static void gre_tnl_destroy(struct vport *vport)
-{
-       struct net *net = ovs_dp_get_net(vport->dp);
-       struct ovs_net *ovs_net;
-
-       ovs_net = net_generic(net, ovs_net_id);
+               return vport;
 
-       RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
-       ovs_vport_deferred_free(vport);
-       gre_exit();
+       return ovs_netdev_link(vport, parms->name);
 }
 
 static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
@@ -288,10 +96,9 @@ static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
 static struct vport_ops ovs_gre_vport_ops = {
        .type           = OVS_VPORT_TYPE_GRE,
        .create         = gre_create,
-       .destroy        = gre_tnl_destroy,
-       .get_name       = gre_get_name,
-       .send           = gre_tnl_send,
+       .send           = ovs_netdev_send,
        .get_egress_tun_info    = gre_get_egress_tun_info,
+       .destroy        = ovs_netdev_tunnel_destroy,
        .owner          = THIS_MODULE,
 };
 
index cddb7069b11b7852093baa0efc67eddb73bf3a2b..a75011505039222c85332b364dec8cd014934f2d 100644 (file)
@@ -57,7 +57,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
        skb_push(skb, ETH_HLEN);
        ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
 
-       ovs_vport_receive(vport, skb, skb_tunnel_info(skb, AF_INET));
+       ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
        return;
 
 error:
@@ -147,7 +147,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
        return ovs_netdev_link(vport, parms->name);
 }
 
-void ovs_vport_free_rcu(struct rcu_head *rcu)
+static void vport_netdev_free(struct rcu_head *rcu)
 {
        struct vport *vport = container_of(rcu, struct vport, rcu);
 
@@ -155,7 +155,6 @@ void ovs_vport_free_rcu(struct rcu_head *rcu)
                dev_put(vport->dev);
        ovs_vport_free(vport);
 }
-EXPORT_SYMBOL_GPL(ovs_vport_free_rcu);
 
 void ovs_netdev_detach_dev(struct vport *vport)
 {
@@ -175,9 +174,25 @@ static void netdev_destroy(struct vport *vport)
                ovs_netdev_detach_dev(vport);
        rtnl_unlock();
 
-       call_rcu(&vport->rcu, ovs_vport_free_rcu);
+       call_rcu(&vport->rcu, vport_netdev_free);
 }
 
+void ovs_netdev_tunnel_destroy(struct vport *vport)
+{
+       rtnl_lock();
+       if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
+               ovs_netdev_detach_dev(vport);
+
+       /* Early release so we can unregister the device */
+       dev_put(vport->dev);
+       rtnl_delete_link(vport->dev);
+       vport->dev = NULL;
+       rtnl_unlock();
+
+       call_rcu(&vport->rcu, vport_netdev_free);
+}
+EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
+
 static unsigned int packet_length(const struct sk_buff *skb)
 {
        unsigned int length = skb->len - ETH_HLEN;
index 804412697a90c46f43214d2e80ac447c23aea6ae..497cc81f1aca8d20cf382be4ad759f3e2cacef05 100644 (file)
@@ -29,9 +29,9 @@ struct vport *ovs_netdev_get_vport(struct net_device *dev);
 struct vport *ovs_netdev_link(struct vport *vport, const char *name);
 int ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
 void ovs_netdev_detach_dev(struct vport *);
-void ovs_vport_free_rcu(struct rcu_head *);
 
 int __init ovs_netdev_init(void);
 void ovs_netdev_exit(void);
 
+void ovs_netdev_tunnel_destroy(struct vport *vport);
 #endif /* vport_netdev.h */
index 547173336cd308567c6815170a7ff542710b5705..1e8b00a23a239c1fa456c614738a3c55ed211a5e 100644 (file)
@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
        int err;
        struct vxlan_config conf = {
                .no_share = true,
-               .flags = VXLAN_F_FLOW_BASED | VXLAN_F_COLLECT_METADATA,
+               .flags = VXLAN_F_COLLECT_METADATA,
        };
 
        if (!options) {
@@ -146,21 +146,6 @@ static struct vport *vxlan_create(const struct vport_parms *parms)
        return ovs_netdev_link(vport, parms->name);
 }
 
-static void vxlan_destroy(struct vport *vport)
-{
-       rtnl_lock();
-       if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
-               ovs_netdev_detach_dev(vport);
-
-       /* Early release so we can unregister the device */
-       dev_put(vport->dev);
-       rtnl_delete_link(vport->dev);
-       vport->dev = NULL;
-       rtnl_unlock();
-
-       call_rcu(&vport->rcu, ovs_vport_free_rcu);
-}
-
 static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
                                     struct ip_tunnel_info *egress_tun_info)
 {
@@ -183,7 +168,7 @@ static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
 static struct vport_ops ovs_vxlan_netdev_vport_ops = {
        .type                   = OVS_VPORT_TYPE_VXLAN,
        .create                 = vxlan_create,
-       .destroy                = vxlan_destroy,
+       .destroy                = ovs_netdev_tunnel_destroy,
        .get_options            = vxlan_get_options,
        .send                   = ovs_netdev_send,
        .get_egress_tun_info    = vxlan_get_egress_tun_info,
index d14f59403c5eb61cdde91cbe557617194756ccaf..d73e5a16e7ca80b20f5e48820cbe0a723d39e1ad 100644 (file)
@@ -603,9 +603,9 @@ int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info,
         * saddr, tp_src and tp_dst
         */
        __ip_tunnel_info_init(egress_tun_info,
-                             fl.saddr, tun_key->ipv4_dst,
-                             tun_key->ipv4_tos,
-                             tun_key->ipv4_ttl,
+                             fl.saddr, tun_key->u.ipv4.dst,
+                             tun_key->tos,
+                             tun_key->ttl,
                              tp_src, tp_dst,
                              tun_key->tun_id,
                              tun_key->tun_flags,
index 1a689c28b5a6356dc1ffe061c80fc537822a4e7b..b88b3ee86f079beb2b63269cbfe8e97067f6d1e3 100644 (file)
@@ -254,9 +254,9 @@ static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
        struct rtable *rt;
 
        memset(fl, 0, sizeof(*fl));
-       fl->daddr = key->ipv4_dst;
-       fl->saddr = key->ipv4_src;
-       fl->flowi4_tos = RT_TOS(key->ipv4_tos);
+       fl->daddr = key->u.ipv4.dst;
+       fl->saddr = key->u.ipv4.src;
+       fl->flowi4_tos = RT_TOS(key->tos);
        fl->flowi4_mark = mark;
        fl->flowi4_proto = protocol;
 
index b5afe538bb88e9b97eec2faa4e93acbb16be4fa0..7b8e39a223879c3cb0a9d2a0bc6ff003118379f9 100644 (file)
@@ -92,6 +92,7 @@
 #ifdef CONFIG_INET
 #include <net/inet_common.h>
 #endif
+#include <linux/bpf.h>
 
 #include "internal.h"
 
@@ -1410,6 +1411,22 @@ static unsigned int fanout_demux_qm(struct packet_fanout *f,
        return skb_get_queue_mapping(skb) % num;
 }
 
+static unsigned int fanout_demux_bpf(struct packet_fanout *f,
+                                    struct sk_buff *skb,
+                                    unsigned int num)
+{
+       struct bpf_prog *prog;
+       unsigned int ret = 0;
+
+       rcu_read_lock();
+       prog = rcu_dereference(f->bpf_prog);
+       if (prog)
+               ret = BPF_PROG_RUN(prog, skb) % num;
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
 {
        return f->flags & (flag >> 8);
@@ -1454,6 +1471,10 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
        case PACKET_FANOUT_ROLLOVER:
                idx = fanout_demux_rollover(f, skb, 0, false, num);
                break;
+       case PACKET_FANOUT_CBPF:
+       case PACKET_FANOUT_EBPF:
+               idx = fanout_demux_bpf(f, skb, num);
+               break;
        }
 
        if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
@@ -1502,6 +1523,103 @@ static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
        return false;
 }
 
+static void fanout_init_data(struct packet_fanout *f)
+{
+       switch (f->type) {
+       case PACKET_FANOUT_LB:
+               atomic_set(&f->rr_cur, 0);
+               break;
+       case PACKET_FANOUT_CBPF:
+       case PACKET_FANOUT_EBPF:
+               RCU_INIT_POINTER(f->bpf_prog, NULL);
+               break;
+       }
+}
+
+static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
+{
+       struct bpf_prog *old;
+
+       spin_lock(&f->lock);
+       old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
+       rcu_assign_pointer(f->bpf_prog, new);
+       spin_unlock(&f->lock);
+
+       if (old) {
+               synchronize_net();
+               bpf_prog_destroy(old);
+       }
+}
+
+static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
+                               unsigned int len)
+{
+       struct bpf_prog *new;
+       struct sock_fprog fprog;
+       int ret;
+
+       if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+               return -EPERM;
+       if (len != sizeof(fprog))
+               return -EINVAL;
+       if (copy_from_user(&fprog, data, len))
+               return -EFAULT;
+
+       ret = bpf_prog_create_from_user(&new, &fprog, NULL);
+       if (ret)
+               return ret;
+
+       __fanout_set_data_bpf(po->fanout, new);
+       return 0;
+}
+
+static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
+                               unsigned int len)
+{
+       struct bpf_prog *new;
+       u32 fd;
+
+       if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+               return -EPERM;
+       if (len != sizeof(fd))
+               return -EINVAL;
+       if (copy_from_user(&fd, data, len))
+               return -EFAULT;
+
+       new = bpf_prog_get(fd);
+       if (IS_ERR(new))
+               return PTR_ERR(new);
+       if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
+               bpf_prog_put(new);
+               return -EINVAL;
+       }
+
+       __fanout_set_data_bpf(po->fanout, new);
+       return 0;
+}
+
+static int fanout_set_data(struct packet_sock *po, char __user *data,
+                          unsigned int len)
+{
+       switch (po->fanout->type) {
+       case PACKET_FANOUT_CBPF:
+               return fanout_set_data_cbpf(po, data, len);
+       case PACKET_FANOUT_EBPF:
+               return fanout_set_data_ebpf(po, data, len);
+       default:
+               return -EINVAL;
+       };
+}
+
+static void fanout_release_data(struct packet_fanout *f)
+{
+       switch (f->type) {
+       case PACKET_FANOUT_CBPF:
+       case PACKET_FANOUT_EBPF:
+               __fanout_set_data_bpf(f, NULL);
+       };
+}
+
 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 {
        struct packet_sock *po = pkt_sk(sk);
@@ -1519,6 +1637,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
        case PACKET_FANOUT_CPU:
        case PACKET_FANOUT_RND:
        case PACKET_FANOUT_QM:
+       case PACKET_FANOUT_CBPF:
+       case PACKET_FANOUT_EBPF:
                break;
        default:
                return -EINVAL;
@@ -1561,10 +1681,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                match->id = id;
                match->type = type;
                match->flags = flags;
-               atomic_set(&match->rr_cur, 0);
                INIT_LIST_HEAD(&match->list);
                spin_lock_init(&match->lock);
                atomic_set(&match->sk_ref, 0);
+               fanout_init_data(match);
                match->prot_hook.type = po->prot_hook.type;
                match->prot_hook.dev = po->prot_hook.dev;
                match->prot_hook.func = packet_rcv_fanout;
@@ -1610,6 +1730,7 @@ static void fanout_release(struct sock *sk)
        if (atomic_dec_and_test(&f->sk_ref)) {
                list_del(&f->list);
                dev_remove_pack(&f->prot_hook);
+               fanout_release_data(f);
                kfree(f);
        }
        mutex_unlock(&fanout_mutex);
@@ -3529,6 +3650,13 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                return fanout_add(sk, val & 0xffff, val >> 16);
        }
+       case PACKET_FANOUT_DATA:
+       {
+               if (!po->fanout)
+                       return -EINVAL;
+
+               return fanout_set_data(po, optval, optlen);
+       }
        case PACKET_TX_HAS_OFF:
        {
                unsigned int val;
index e20b3e8829b8acac25b74d7bbc23636b089d1ff4..9ee46314b7d76df47d683c252a92ce97398d592b 100644 (file)
@@ -79,7 +79,10 @@ struct packet_fanout {
        u16                     id;
        u8                      type;
        u8                      flags;
-       atomic_t                rr_cur;
+       union {
+               atomic_t                rr_cur;
+               struct bpf_prog __rcu   *bpf_prog;
+       };
        struct list_head        list;
        struct sock             *arr[PACKET_FANOUT_MAX];
        spinlock_t              lock;
index 4ebd29c128b63f7c9519a421128351b5e502853a..dd666fb9b4e1761a50fcf1fdf91ef508b55c4aca 100644 (file)
@@ -185,7 +185,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                ret = 0;
                goto out;
        }
-       trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
+       trans = rds_trans_get_preferred(sock_net(sock->sk),
+                                       sin->sin_addr.s_addr);
        if (!trans) {
                ret = -EADDRNOTAVAIL;
                rds_remove_bound(rs);
index da6da57e5f36b5cc13a5bc92abfedb6a5ccea45d..d4fecb21ca2541f856b015b51a5fd24367953043 100644 (file)
@@ -117,7 +117,8 @@ static void rds_conn_reset(struct rds_connection *conn)
  * For now they are not garbage collected once they're created.  They
  * are torn down as the module is removed, if ever.
  */
-static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
+static struct rds_connection *__rds_conn_create(struct net *net,
+                                               __be32 laddr, __be32 faddr,
                                       struct rds_transport *trans, gfp_t gfp,
                                       int is_outgoing)
 {
@@ -157,6 +158,7 @@ new_conn:
        conn->c_faddr = faddr;
        spin_lock_init(&conn->c_lock);
        conn->c_next_tx_seq = 1;
+       rds_conn_net_set(conn, net);
 
        init_waitqueue_head(&conn->c_waitq);
        INIT_LIST_HEAD(&conn->c_send_queue);
@@ -174,7 +176,7 @@ new_conn:
         * can bind to the destination address then we'd rather the messages
         * flow through loopback rather than either transport.
         */
-       loop_trans = rds_trans_get_preferred(faddr);
+       loop_trans = rds_trans_get_preferred(net, faddr);
        if (loop_trans) {
                rds_trans_put(loop_trans);
                conn->c_loopback = 1;
@@ -260,17 +262,19 @@ out:
        return conn;
 }
 
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+                                      __be32 laddr, __be32 faddr,
                                       struct rds_transport *trans, gfp_t gfp)
 {
-       return __rds_conn_create(laddr, faddr, trans, gfp, 0);
+       return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
 }
 EXPORT_SYMBOL_GPL(rds_conn_create);
 
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+                                               __be32 laddr, __be32 faddr,
                                       struct rds_transport *trans, gfp_t gfp)
 {
-       return __rds_conn_create(laddr, faddr, trans, gfp, 1);
+       return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
 }
 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
 
index ba2dffeff60876ca669993d1863dcbb6cb76a740..13814227b3b235e4b9c924c1eab03859b04cc1f5 100644 (file)
@@ -317,7 +317,7 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len,
  * allowed to influence which paths have priority.  We could call userspace
  * asserting this policy "routing".
  */
-static int rds_ib_laddr_check(__be32 addr)
+static int rds_ib_laddr_check(struct net *net, __be32 addr)
 {
        int ret;
        struct rdma_cm_id *cm_id;
index 0da2a45b33bd8ee5df6915e253440d3bc65081fc..f40d8f52b75398559cc36c6c236d83d0e1a5b653 100644 (file)
@@ -448,8 +448,9 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
                 (unsigned long long)be64_to_cpu(lguid),
                 (unsigned long long)be64_to_cpu(fguid));
 
-       conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport,
-                              GFP_KERNEL);
+       /* RDS/IB is not currently netns aware, thus init_net */
+       conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+                              &rds_ib_transport, GFP_KERNEL);
        if (IS_ERR(conn)) {
                rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
                conn = NULL;
index 9a6b4f66187cf3e5ab533cd01344c9856834ebb7..140a44a5f7b7f1c08b3f329707b72fc75a9a81fe 100644 (file)
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
 
        /* check for all kinds of wrapping and the like */
        start = (unsigned long)optval;
-       if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
+       if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
                ret = -EINVAL;
                goto out;
        }
index 589935661d667d81b2f6159eb69c237f95329a63..5d5a9d258658258ebf5579a6c47fb1149c784ab1 100644 (file)
@@ -218,7 +218,7 @@ static void rds_iw_ic_info(struct socket *sock, unsigned int len,
  * allowed to influence which paths have priority.  We could call userspace
  * asserting this policy "routing".
  */
-static int rds_iw_laddr_check(__be32 addr)
+static int rds_iw_laddr_check(struct net *net, __be32 addr)
 {
        int ret;
        struct rdma_cm_id *cm_id;
index 8f486fa3207901895e5184f50eb55563b6628e30..a6553a6fb2bc2e5053152b620851790379673d40 100644 (file)
@@ -398,8 +398,9 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
                 &dp->dp_saddr, &dp->dp_daddr,
                 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
 
-       conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_iw_transport,
-                              GFP_KERNEL);
+       /* RDS/IW is not currently netns aware, thus init_net */
+       conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+                              &rds_iw_transport, GFP_KERNEL);
        if (IS_ERR(conn)) {
                rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
                conn = NULL;
index 2260c1e434b19835b776db41f5a978be4ad6e9b2..9005fb0586f627ce0b47783a71b98897f49538fd 100644 (file)
@@ -128,8 +128,21 @@ struct rds_connection {
 
        /* Protocol version */
        unsigned int            c_version;
+       possible_net_t          c_net;
 };
 
+static inline
+struct net *rds_conn_net(struct rds_connection *conn)
+{
+       return read_pnet(&conn->c_net);
+}
+
+static inline
+void rds_conn_net_set(struct rds_connection *conn, struct net *net)
+{
+       write_pnet(&conn->c_net, net);
+}
+
 #define RDS_FLAG_CONG_BITMAP   0x01
 #define RDS_FLAG_ACK_REQUIRED  0x02
 #define RDS_FLAG_RETRANSMITTED 0x04
@@ -417,7 +430,7 @@ struct rds_transport {
        unsigned int            t_prefer_loopback:1;
        unsigned int            t_type;
 
-       int (*laddr_check)(__be32 addr);
+       int (*laddr_check)(struct net *net, __be32 addr);
        int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
        void (*conn_free)(void *data);
        int (*conn_connect)(struct rds_connection *conn);
@@ -608,9 +621,11 @@ struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
 /* conn.c */
 int rds_conn_init(void);
 void rds_conn_exit(void);
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+                                      __be32 laddr, __be32 faddr,
                                       struct rds_transport *trans, gfp_t gfp);
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+                                               __be32 laddr, __be32 faddr,
                               struct rds_transport *trans, gfp_t gfp);
 void rds_conn_shutdown(struct rds_connection *conn);
 void rds_conn_destroy(struct rds_connection *conn);
@@ -795,7 +810,7 @@ void rds_connect_complete(struct rds_connection *conn);
 /* transport.c */
 int rds_trans_register(struct rds_transport *trans);
 void rds_trans_unregister(struct rds_transport *trans);
-struct rds_transport *rds_trans_get_preferred(__be32 addr);
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
 void rds_trans_put(struct rds_transport *trans);
 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
                                       unsigned int avail);
index e9430f537f9c2bb23bbaeeb66933e1e85058bd34..2581b8e3dbe70b70a73bac2511c75dcb1efba85f 100644 (file)
@@ -1023,7 +1023,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
                conn = rs->rs_conn;
        else {
-               conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
+               conn = rds_conn_create_outgoing(sock_net(sock->sk),
+                                               rs->rs_bound_addr, daddr,
                                        rs->rs_transport,
                                        sock->sk->sk_allocation);
                if (IS_ERR(conn)) {
index edac9ef2bc8b1c2060a2030deb4225d14c2fcc89..c42b60bf4c68eb26f7364df7e045c40847ea3be3 100644 (file)
@@ -35,6 +35,9 @@
 #include <linux/in.h>
 #include <linux/module.h>
 #include <net/tcp.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/tcp.h>
 
 #include "rds.h"
 #include "tcp.h"
@@ -189,9 +192,9 @@ out:
        spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
 }
 
-static int rds_tcp_laddr_check(__be32 addr)
+static int rds_tcp_laddr_check(struct net *net, __be32 addr)
 {
-       if (inet_addr_type(&init_net, addr) == RTN_LOCAL)
+       if (inet_addr_type(net, addr) == RTN_LOCAL)
                return 0;
        return -EADDRNOTAVAIL;
 }
@@ -250,16 +253,7 @@ static void rds_tcp_destroy_conns(void)
        }
 }
 
-static void rds_tcp_exit(void)
-{
-       rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
-       rds_tcp_listen_stop();
-       rds_tcp_destroy_conns();
-       rds_trans_unregister(&rds_tcp_transport);
-       rds_tcp_recv_exit();
-       kmem_cache_destroy(rds_tcp_conn_slab);
-}
-module_exit(rds_tcp_exit);
+static void rds_tcp_exit(void);
 
 struct rds_transport rds_tcp_transport = {
        .laddr_check            = rds_tcp_laddr_check,
@@ -281,6 +275,136 @@ struct rds_transport rds_tcp_transport = {
        .t_prefer_loopback      = 1,
 };
 
+static int rds_tcp_netid;
+
+/* per-network namespace private data for this module */
+struct rds_tcp_net {
+       struct socket *rds_tcp_listen_sock;
+       struct work_struct rds_tcp_accept_w;
+};
+
+static void rds_tcp_accept_worker(struct work_struct *work)
+{
+       struct rds_tcp_net *rtn = container_of(work,
+                                              struct rds_tcp_net,
+                                              rds_tcp_accept_w);
+
+       while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
+               cond_resched();
+}
+
+void rds_tcp_accept_work(struct sock *sk)
+{
+       struct net *net = sock_net(sk);
+       struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+       queue_work(rds_wq, &rtn->rds_tcp_accept_w);
+}
+
+static __net_init int rds_tcp_init_net(struct net *net)
+{
+       struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+       rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
+       if (!rtn->rds_tcp_listen_sock) {
+               pr_warn("could not set up listen sock\n");
+               return -EAFNOSUPPORT;
+       }
+       INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
+       return 0;
+}
+
+static void __net_exit rds_tcp_exit_net(struct net *net)
+{
+       struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+       /* If rds_tcp_exit_net() is called as a result of netns deletion,
+        * the rds_tcp_kill_sock() device notifier would already have cleaned
+        * up the listen socket, thus there is no work to do in this function.
+        *
+        * If rds_tcp_exit_net() is called as a result of module unload,
+        * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
+        * we do need to clean up the listen socket here.
+        */
+       if (rtn->rds_tcp_listen_sock) {
+               rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+               rtn->rds_tcp_listen_sock = NULL;
+               flush_work(&rtn->rds_tcp_accept_w);
+       }
+}
+
+static struct pernet_operations rds_tcp_net_ops = {
+       .init = rds_tcp_init_net,
+       .exit = rds_tcp_exit_net,
+       .id = &rds_tcp_netid,
+       .size = sizeof(struct rds_tcp_net),
+};
+
+static void rds_tcp_kill_sock(struct net *net)
+{
+       struct rds_tcp_connection *tc, *_tc;
+       struct sock *sk;
+       LIST_HEAD(tmp_list);
+       struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+       rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+       rtn->rds_tcp_listen_sock = NULL;
+       flush_work(&rtn->rds_tcp_accept_w);
+       spin_lock_irq(&rds_tcp_conn_lock);
+       list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+               struct net *c_net = read_pnet(&tc->conn->c_net);
+
+               if (net != c_net || !tc->t_sock)
+                       continue;
+               list_move_tail(&tc->t_tcp_node, &tmp_list);
+       }
+       spin_unlock_irq(&rds_tcp_conn_lock);
+       list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
+               sk = tc->t_sock->sk;
+               sk->sk_prot->disconnect(sk, 0);
+               tcp_done(sk);
+               if (tc->conn->c_passive)
+                       rds_conn_destroy(tc->conn->c_passive);
+               rds_conn_destroy(tc->conn);
+       }
+}
+
+static int rds_tcp_dev_event(struct notifier_block *this,
+                            unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       /* rds-tcp registers as a pernet subys, so the ->exit will only
+        * get invoked after network acitivity has quiesced. We need to
+        * clean up all sockets  to quiesce network activity, and use
+        * the unregistration of the per-net loopback device as a trigger
+        * to start that cleanup.
+        */
+       if (event == NETDEV_UNREGISTER_FINAL &&
+           dev->ifindex == LOOPBACK_IFINDEX)
+               rds_tcp_kill_sock(dev_net(dev));
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block rds_tcp_dev_notifier = {
+       .notifier_call        = rds_tcp_dev_event,
+       .priority = -10, /* must be called after other network notifiers */
+};
+
+static void rds_tcp_exit(void)
+{
+       rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
+       unregister_pernet_subsys(&rds_tcp_net_ops);
+       if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
+               pr_warn("could not unregister rds_tcp_dev_notifier\n");
+       rds_tcp_destroy_conns();
+       rds_trans_unregister(&rds_tcp_transport);
+       rds_tcp_recv_exit();
+       kmem_cache_destroy(rds_tcp_conn_slab);
+}
+module_exit(rds_tcp_exit);
+
 static int rds_tcp_init(void)
 {
        int ret;
@@ -293,6 +417,16 @@ static int rds_tcp_init(void)
                goto out;
        }
 
+       ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
+       if (ret) {
+               pr_warn("could not register rds_tcp_dev_notifier\n");
+               goto out;
+       }
+
+       ret = register_pernet_subsys(&rds_tcp_net_ops);
+       if (ret)
+               goto out_slab;
+
        ret = rds_tcp_recv_init();
        if (ret)
                goto out_slab;
@@ -301,19 +435,14 @@ static int rds_tcp_init(void)
        if (ret)
                goto out_recv;
 
-       ret = rds_tcp_listen_init();
-       if (ret)
-               goto out_register;
-
        rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
 
        goto out;
 
-out_register:
-       rds_trans_unregister(&rds_tcp_transport);
 out_recv:
        rds_tcp_recv_exit();
 out_slab:
+       unregister_pernet_subsys(&rds_tcp_net_ops);
        kmem_cache_destroy(rds_tcp_conn_slab);
 out:
        return ret;
index 0dbdd37162da34ea33eddbd610d90aa78b32b93c..64f873c0c6b6d15574ec9771506d04e617a6aaac 100644 (file)
@@ -52,6 +52,7 @@ u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
 u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
 extern struct rds_transport rds_tcp_transport;
+void rds_tcp_accept_work(struct sock *sk);
 
 /* tcp_connect.c */
 int rds_tcp_conn_connect(struct rds_connection *conn);
@@ -59,9 +60,11 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn);
 void rds_tcp_state_change(struct sock *sk);
 
 /* tcp_listen.c */
-int rds_tcp_listen_init(void);
-void rds_tcp_listen_stop(void);
+struct socket *rds_tcp_listen_init(struct net *);
+void rds_tcp_listen_stop(struct socket *);
 void rds_tcp_listen_data_ready(struct sock *sk);
+int rds_tcp_accept_one(struct socket *sock);
+int rds_tcp_keepalive(struct socket *sock);
 
 /* tcp_recv.c */
 int rds_tcp_recv_init(void);
index 973109c7b8e86f21bec783eb9e4e118e6e8ebb8b..5cb16875c4603dba71c733de6600ada40e39cffc 100644 (file)
@@ -79,7 +79,8 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        struct sockaddr_in src, dest;
        int ret;
 
-       ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+       ret = sock_create_kern(rds_conn_net(conn), PF_INET,
+                              SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret < 0)
                goto out;
 
@@ -111,10 +112,12 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
        if (ret == -EINPROGRESS)
                ret = 0;
-       if (ret == 0)
+       if (ret == 0) {
+               rds_tcp_keepalive(sock);
                sock = NULL;
-       else
+       } else {
                rds_tcp_restore_callbacks(sock, conn->c_transport_data);
+       }
 
 out:
        if (sock)
index 0da49e34495f1e974bf466b83ba1144ec8965f61..444d78d0bd77c0ddc95daf99a22fc5983ea95bb6 100644 (file)
 #include "rds.h"
 #include "tcp.h"
 
-/*
- * cheesy, but simple..
- */
-static void rds_tcp_accept_worker(struct work_struct *work);
-static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
-static struct socket *rds_tcp_listen_sock;
-
-static int rds_tcp_keepalive(struct socket *sock)
+int rds_tcp_keepalive(struct socket *sock)
 {
        /* values below based on xs_udp_default_timeout */
        int keepidle = 5; /* send a probe 'keepidle' secs after last data */
@@ -77,7 +70,7 @@ bail:
        return ret;
 }
 
-static int rds_tcp_accept_one(struct socket *sock)
+int rds_tcp_accept_one(struct socket *sock)
 {
        struct socket *new_sock = NULL;
        struct rds_connection *conn;
@@ -85,8 +78,9 @@ static int rds_tcp_accept_one(struct socket *sock)
        struct inet_sock *inet;
        struct rds_tcp_connection *rs_tcp;
 
-       ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
-                              sock->sk->sk_protocol, &new_sock);
+       ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+                              sock->sk->sk_type, sock->sk->sk_protocol,
+                              &new_sock);
        if (ret)
                goto out;
 
@@ -108,7 +102,8 @@ static int rds_tcp_accept_one(struct socket *sock)
                 &inet->inet_saddr, ntohs(inet->inet_sport),
                 &inet->inet_daddr, ntohs(inet->inet_dport));
 
-       conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
+       conn = rds_conn_create(sock_net(sock->sk),
+                              inet->inet_saddr, inet->inet_daddr,
                               &rds_tcp_transport, GFP_KERNEL);
        if (IS_ERR(conn)) {
                ret = PTR_ERR(conn);
@@ -148,12 +143,6 @@ out:
        return ret;
 }
 
-static void rds_tcp_accept_worker(struct work_struct *work)
-{
-       while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
-               cond_resched();
-}
-
 void rds_tcp_listen_data_ready(struct sock *sk)
 {
        void (*ready)(struct sock *sk);
@@ -174,20 +163,20 @@ void rds_tcp_listen_data_ready(struct sock *sk)
         * socket
         */
        if (sk->sk_state == TCP_LISTEN)
-               queue_work(rds_wq, &rds_tcp_listen_work);
+               rds_tcp_accept_work(sk);
 
 out:
        read_unlock(&sk->sk_callback_lock);
        ready(sk);
 }
 
-int rds_tcp_listen_init(void)
+struct socket *rds_tcp_listen_init(struct net *net)
 {
        struct sockaddr_in sin;
        struct socket *sock = NULL;
        int ret;
 
-       ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+       ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret < 0)
                goto out;
 
@@ -211,17 +200,15 @@ int rds_tcp_listen_init(void)
        if (ret < 0)
                goto out;
 
-       rds_tcp_listen_sock = sock;
-       sock = NULL;
+       return sock;
 out:
        if (sock)
                sock_release(sock);
-       return ret;
+       return NULL;
 }
 
-void rds_tcp_listen_stop(void)
+void rds_tcp_listen_stop(struct socket *sock)
 {
-       struct socket *sock = rds_tcp_listen_sock;
        struct sock *sk;
 
        if (!sock)
@@ -242,5 +229,4 @@ void rds_tcp_listen_stop(void)
        /* wait for accepts to stop and close the socket */
        flush_workqueue(rds_wq);
        sock_release(sock);
-       rds_tcp_listen_sock = NULL;
 }
index 83498e1c75b83f3230dace76da031a6f46753ee9..f3afd1d60d3c7a7880993554c6375754fb4c6ff8 100644 (file)
@@ -77,7 +77,7 @@ void rds_trans_put(struct rds_transport *trans)
                module_put(trans->t_owner);
 }
 
-struct rds_transport *rds_trans_get_preferred(__be32 addr)
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr)
 {
        struct rds_transport *ret = NULL;
        struct rds_transport *trans;
@@ -90,7 +90,7 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr)
        for (i = 0; i < RDS_TRANS_COUNT; i++) {
                trans = transports[i];
 
-               if (trans && (trans->laddr_check(addr) == 0) &&
+               if (trans && (trans->laddr_check(net, addr) == 0) &&
                    (!trans->t_owner || try_module_get(trans->t_owner))) {
                        ret = trans;
                        break;
index 4c10e7e6c9f6ae53291d3f72d82b6c9b0539b552..598d374f6a35f714db4753efa7ab28d6782419a9 100644 (file)
@@ -36,7 +36,8 @@ config RFKILL_REGULATOR
 
 config RFKILL_GPIO
        tristate "GPIO RFKILL driver"
-       depends on RFKILL && GPIOLIB
+       depends on RFKILL
+       depends on GPIOLIB || COMPILE_TEST
        default n
        help
          If you say yes here you get support of a generic gpio RFKILL
index d5d58d9195524f36b03bdac0125148cb7110901a..93127220cb54ac7ffdc78890a3753031652b8139 100644 (file)
@@ -164,7 +164,6 @@ static int rfkill_gpio_remove(struct platform_device *pdev)
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id rfkill_acpi_match[] = {
        { "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
-       { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E40", RFKILL_TYPE_BLUETOOTH },
        { "BCM2E64", RFKILL_TYPE_BLUETOOTH },
index f2b540220ad02f1f8e3b2add9c7477a334081c3d..5019a47b9270e758f65c346631becc99e933b0c6 100644 (file)
@@ -37,6 +37,7 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
        struct nf_conntrack_tuple tuple;
        enum ip_conntrack_info ctinfo;
        struct tcf_connmark_info *ca = a->priv;
+       struct nf_conntrack_zone zone;
        struct nf_conn *c;
        int proto;
 
@@ -70,7 +71,10 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
                               proto, &tuple))
                goto out;
 
-       thash = nf_conntrack_find_get(dev_net(skb->dev), ca->zone, &tuple);
+       zone.id = ca->zone;
+       zone.dir = NF_CT_DEFAULT_ZONE_DIR;
+
+       thash = nf_conntrack_find_get(dev_net(skb->dev), &zone, &tuple);
        if (!thash)
                goto out;
 
index 19cd8904efa0a46b9d659f36322a78e4cbb64c38..2d1be4a760fdc4361f23d0aa93a861298eaafe45 100644 (file)
@@ -101,6 +101,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                        return ret;
                ret = ACT_P_CREATED;
        } else {
+               if (bind)
+                       return 0;
                if (!ovr) {
                        tcf_hash_release(a, bind);
                        return -EEXIST;
index 5be0b3c1c5b0c9f17e3fbd4e1dc1c92c7a8e5aed..b7c4ead8b5a8e863d87f8c1c4c1e37840fe50577 100644 (file)
@@ -162,7 +162,8 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
                        goto drop;
 
                tcph = (void *)(skb_network_header(skb) + ihl);
-               inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
+               inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
+                                        true);
                break;
        }
        case IPPROTO_UDP:
@@ -178,7 +179,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
                udph = (void *)(skb_network_header(skb) + ihl);
                if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
                        inet_proto_csum_replace4(&udph->check, skb, addr,
-                                                new_addr, 1);
+                                                new_addr, true);
                        if (!udph->check)
                                udph->check = CSUM_MANGLED_0;
                }
@@ -231,7 +232,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
                        iph->saddr = new_addr;
 
                inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
-                                        0);
+                                        false);
                break;
        }
        default:
index 2e2398cfc694aaf7ed12c2afda377e8ded340cb2..2177eac0a61ed00c6c60655f577e0dd816fd2c08 100644 (file)
@@ -54,7 +54,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
        bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
 
        if (opt == NULL) {
-               u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
+               u32 limit = qdisc_dev(sch)->tx_queue_len;
 
                if (is_bfifo)
                        limit *= psched_mtu(qdisc_dev(sch));
index 21ca33c9f0368b21cdb00fbdbbca4851c2ad87a2..a9ba030435a2a5c2ca4bea21d62c6d631c6168f4 100644 (file)
@@ -288,10 +288,26 @@ begin:
 
 static void fq_codel_reset(struct Qdisc *sch)
 {
-       struct sk_buff *skb;
+       struct fq_codel_sched_data *q = qdisc_priv(sch);
+       int i;
 
-       while ((skb = fq_codel_dequeue(sch)) != NULL)
-               kfree_skb(skb);
+       INIT_LIST_HEAD(&q->new_flows);
+       INIT_LIST_HEAD(&q->old_flows);
+       for (i = 0; i < q->flows_cnt; i++) {
+               struct fq_codel_flow *flow = q->flows + i;
+
+               while (flow->head) {
+                       struct sk_buff *skb = dequeue_head(flow);
+
+                       qdisc_qstats_backlog_dec(sch, skb);
+                       kfree_skb(skb);
+               }
+
+               INIT_LIST_HEAD(&flow->flowchain);
+               codel_vars_init(&flow->cvars);
+       }
+       memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
+       sch->q.qlen = 0;
 }
 
 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
index 6efca30894aad4294824d2ba64f732239a921517..942fea8405a476a3f4e23db9a826b8b5fe5f4ecd 100644 (file)
@@ -735,7 +735,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
 {
        struct Qdisc *qdisc = &noqueue_qdisc;
 
-       if (dev->tx_queue_len) {
+       if (dev->tx_queue_len && !(dev->priv_flags & IFF_NO_QUEUE)) {
                qdisc = qdisc_create_dflt(dev_queue,
                                          default_qdisc_ops, TC_H_ROOT);
                if (!qdisc) {
@@ -755,7 +755,9 @@ static void attach_default_qdiscs(struct net_device *dev)
 
        txq = netdev_get_tx_queue(dev, 0);
 
-       if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
+       if (!netif_is_multiqueue(dev) ||
+           dev->tx_queue_len == 0 ||
+           dev->priv_flags & IFF_NO_QUEUE) {
                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
                dev->qdisc = txq->qdisc_sleeping;
                atomic_inc(&dev->qdisc->refcnt);
index abb9f2fec28fbd435ff89a17eb7e1937cb4c19b3..80105109f756315d7a31d6734887941c9f16ea9c 100644 (file)
@@ -512,11 +512,9 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt)
 
        if (tb[TCA_GRED_LIMIT])
                sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
-       else {
-               u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1;
-
-               sch->limit = qlen * psched_mtu(qdisc_dev(sch));
-       }
+       else
+               sch->limit = qdisc_dev(sch)->tx_queue_len
+                            * psched_mtu(qdisc_dev(sch));
 
        return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
 }
index f1acb0f60dc35b724289f98498b5cf2162583ce9..cf4b0f865d1bc6a87873b54bb89cfe8e28f36ea6 100644 (file)
@@ -1048,11 +1048,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 
        if (tb[TCA_HTB_DIRECT_QLEN])
                q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
-       else {
+       else
                q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
-               if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
-                       q->direct_qlen = 2;
-       }
+
        if ((q->rate2quantum = gopt->rate2quantum) < 1)
                q->rate2quantum = 1;
        q->defcls = gopt->defcls;
index ade9445a55abe468107f2bcb233e48da027afb4b..5abfe44678d4a1ecda495bf9e00879c649c63cdb 100644 (file)
@@ -130,12 +130,8 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
        q->unplug_indefinite = false;
 
        if (opt == NULL) {
-               /* We will set a default limit of 100 pkts (~150kB)
-                * in case tx_queue_len is not available. The
-                * default value is completely arbitrary.
-                */
-               u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
-               q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
+               q->limit = qdisc_dev(sch)->tx_queue_len
+                          * psched_mtu(qdisc_dev(sch));
        } else {
                struct tc_plug_qopt *ctl = nla_data(opt);
 
index 4b815193326c9abae464c05d53609c28c2071b38..dcdff5c769a1c28e6ea476ce822c8fbe74f9fc53 100644 (file)
@@ -502,7 +502,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
 
        limit = ctl->limit;
        if (limit == 0)
-               limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
+               limit = qdisc_dev(sch)->tx_queue_len;
 
        child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
        if (IS_ERR(child))
index 33bafa2e703e299f3b423d1f95b2a21cf177c634..16c1c43980a12dbe11832ad61777333446b69eea 100644 (file)
@@ -810,7 +810,7 @@ static int switchdev_port_fdb_dump_cb(struct net_device *dev,
        ndm->ndm_flags   = NTF_SELF;
        ndm->ndm_type    = 0;
        ndm->ndm_ifindex = dev->ifindex;
-       ndm->ndm_state   = NUD_REACHABLE;
+       ndm->ndm_state   = obj->u.fdb.ndm_state;
 
        if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
                goto nla_put_failure;
index f067e5425560fe0d43c184589a397d614c12573b..75db07c78a6900157c0b568491a5d4e8e694e274 100644 (file)
@@ -351,11 +351,11 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt)
                        l->state = LINK_RESET;
                        break;
                case LINK_ESTABLISH_EVT:
+               case LINK_SYNCH_END_EVT:
                        break;
                case LINK_SYNCH_BEGIN_EVT:
                        l->state = LINK_SYNCHING;
                        break;
-               case LINK_SYNCH_END_EVT:
                case LINK_FAILOVER_BEGIN_EVT:
                case LINK_FAILOVER_END_EVT:
                default:
@@ -1330,6 +1330,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        u16 peers_snd_nxt =  msg_next_sent(hdr);
        u16 peers_tol = msg_link_tolerance(hdr);
        u16 peers_prio = msg_linkprio(hdr);
+       u16 rcv_nxt = l->rcv_nxt;
        char *if_name;
        int rc = 0;
 
@@ -1393,7 +1394,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                        break;
 
                /* Send NACK if peer has sent pkts we haven't received yet */
-               if (more(peers_snd_nxt, l->rcv_nxt))
+               if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
                        rcvgap = peers_snd_nxt - l->rcv_nxt;
                if (rcvgap || (msg_probe(hdr)))
                        tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
index 53e0fee800864e4b8c29ed793d60a5823d59c773..1eadc95e113294c159a6e5288db831e142be77dc 100644 (file)
@@ -1114,7 +1114,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
        }
 
        len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
-       if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) {
+       if (len && !TLV_OK(msg.req, len)) {
                msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
                err = -EOPNOTSUPP;
                goto send;
index 7c191641b44f64c080745df6615a8eccb237dd38..703875fd6cde204ddeaf630b9a6bd11daec6dbfa 100644 (file)
@@ -423,6 +423,8 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
 
        /* There is still a working link => initiate failover */
        tnl = node_active_link(n, 0);
+       tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+       tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
        n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
        tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
        tipc_link_reset(l);
@@ -565,6 +567,8 @@ void tipc_node_check_dest(struct net *net, u32 onode,
                        goto exit;
                }
                tipc_link_reset(l);
+               if (n->state == NODE_FAILINGOVER)
+                       tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
                le->link = l;
                n->link_cnt++;
                tipc_node_calculate_timer(n, l);
@@ -1075,7 +1079,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        u16 exp_pkts = msg_msgcnt(hdr);
        u16 rcv_nxt, syncpt, dlv_nxt;
        int state = n->state;
-       struct tipc_link *l, *pl = NULL;
+       struct tipc_link *l, *tnl, *pl = NULL;
        struct tipc_media_addr *maddr;
        int i, pb_id;
 
@@ -1129,7 +1133,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
        }
 
        /* Open parallel link when tunnel link reaches synch point */
-       if ((n->state == NODE_FAILINGOVER) && !tipc_link_is_failingover(l)) {
+       if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
                if (!more(rcv_nxt, n->sync_point))
                        return true;
                tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
@@ -1138,6 +1142,10 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
                return true;
        }
 
+       /* No synching needed if only one link */
+       if (!pl || !tipc_link_is_up(pl))
+               return true;
+
        /* Initiate or update synch mode if applicable */
        if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
                syncpt = iseqno + exp_pkts - 1;
@@ -1156,13 +1164,20 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
 
        /* Open tunnel link when parallel link reaches synch point */
        if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
-               if (pl)
-                       dlv_nxt = mod(pl->rcv_nxt - skb_queue_len(pl->inputq));
-               if (!pl || more(dlv_nxt, n->sync_point)) {
-                       tipc_link_fsm_evt(l, LINK_SYNCH_END_EVT);
+               if (tipc_link_is_synching(l)) {
+                       tnl = l;
+               } else {
+                       tnl = pl;
+                       pl = l;
+               }
+               dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq));
+               if (more(dlv_nxt, n->sync_point)) {
+                       tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
                        tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
                        return true;
                }
+               if (l == pl)
+                       return true;
                if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
                        return true;
                if (usr == LINK_PROTOCOL)
index 7d730543f24304dd64b7c9d1d855a6469c8727d8..477364ad750e5441949131cb918fcb2b350bcbbd 100644 (file)
@@ -135,8 +135,7 @@ EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
  * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
  *     %WIMAX_RF_OFF radio off.
  *
- * Reports changes in the software RF switch state to the the WiMAX
- * stack.
+ * Reports changes in the software RF switch state to the WiMAX stack.
  *
  * The main use is during initialization, so the driver can query the
  * device for its current software radio kill switch state and feed it
index 2a0bbd22854bd97b377139200f9e6b5e0ec2662f..3893409dee95b3ba23c39b369a57c48c6e838d9b 100644 (file)
@@ -407,6 +407,9 @@ use_default_name:
        INIT_LIST_HEAD(&rdev->bss_list);
        INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
        INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
+       INIT_LIST_HEAD(&rdev->mlme_unreg);
+       spin_lock_init(&rdev->mlme_unreg_lock);
+       INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk);
        INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
                          cfg80211_dfs_channels_update_work);
 #ifdef CONFIG_CFG80211_WEXT
@@ -802,6 +805,7 @@ void wiphy_unregister(struct wiphy *wiphy)
        cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
        flush_work(&rdev->destroy_work);
        flush_work(&rdev->sched_scan_stop_wk);
+       flush_work(&rdev->mlme_unreg_wk);
 
 #ifdef CONFIG_PM
        if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -855,6 +859,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
 
        switch (wdev->iftype) {
        case NL80211_IFTYPE_P2P_DEVICE:
+               cfg80211_mlme_purge_registrations(wdev);
                cfg80211_stop_p2p_device(rdev, wdev);
                break;
        default:
index 311eef26bf88b9a0e8125678498583fa19edf688..b9d5bc8c148d32ecb8156d19c2652c49824f41cd 100644 (file)
@@ -59,6 +59,10 @@ struct cfg80211_registered_device {
        struct list_head beacon_registrations;
        spinlock_t beacon_registrations_lock;
 
+       struct list_head mlme_unreg;
+       spinlock_t mlme_unreg_lock;
+       struct work_struct mlme_unreg_wk;
+
        /* protected by RTNL only */
        int num_running_ifaces;
        int num_running_monitor_ifaces;
@@ -348,6 +352,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
                                u16 frame_type, const u8 *match_data,
                                int match_len);
+void cfg80211_mlme_unreg_wk(struct work_struct *wk);
 void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
index 7aae329e2b4e4a8e3afa943f670852a6e26e89dd..fb44fa3bf4efa750298163a15534572c43229b2e 100644 (file)
@@ -2,6 +2,7 @@
  * cfg80211 MLME SAP interface
  *
  * Copyright (c) 2009, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2015          Intel Deutschland GmbH
  */
 
 #include <linux/kernel.h>
@@ -389,6 +390,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
 
 struct cfg80211_mgmt_registration {
        struct list_head list;
+       struct wireless_dev *wdev;
 
        u32 nlportid;
 
@@ -399,6 +401,46 @@ struct cfg80211_mgmt_registration {
        u8 match[];
 };
 
+static void
+cfg80211_process_mlme_unregistrations(struct cfg80211_registered_device *rdev)
+{
+       struct cfg80211_mgmt_registration *reg;
+
+       ASSERT_RTNL();
+
+       spin_lock_bh(&rdev->mlme_unreg_lock);
+       while ((reg = list_first_entry_or_null(&rdev->mlme_unreg,
+                                              struct cfg80211_mgmt_registration,
+                                              list))) {
+               list_del(&reg->list);
+               spin_unlock_bh(&rdev->mlme_unreg_lock);
+
+               if (rdev->ops->mgmt_frame_register) {
+                       u16 frame_type = le16_to_cpu(reg->frame_type);
+
+                       rdev_mgmt_frame_register(rdev, reg->wdev,
+                                                frame_type, false);
+               }
+
+               kfree(reg);
+
+               spin_lock_bh(&rdev->mlme_unreg_lock);
+       }
+       spin_unlock_bh(&rdev->mlme_unreg_lock);
+}
+
+void cfg80211_mlme_unreg_wk(struct work_struct *wk)
+{
+       struct cfg80211_registered_device *rdev;
+
+       rdev = container_of(wk, struct cfg80211_registered_device,
+                           mlme_unreg_wk);
+
+       rtnl_lock();
+       cfg80211_process_mlme_unregistrations(rdev);
+       rtnl_unlock();
+}
+
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
                                u16 frame_type, const u8 *match_data,
                                int match_len)
@@ -449,11 +491,18 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
        nreg->match_len = match_len;
        nreg->nlportid = snd_portid;
        nreg->frame_type = cpu_to_le16(frame_type);
+       nreg->wdev = wdev;
        list_add(&nreg->list, &wdev->mgmt_registrations);
+       spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+       /* process all unregistrations to avoid driver confusion */
+       cfg80211_process_mlme_unregistrations(rdev);
 
        if (rdev->ops->mgmt_frame_register)
                rdev_mgmt_frame_register(rdev, wdev, frame_type, true);
 
+       return 0;
+
  out:
        spin_unlock_bh(&wdev->mgmt_registrations_lock);
 
@@ -472,15 +521,12 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
                if (reg->nlportid != nlportid)
                        continue;
 
-               if (rdev->ops->mgmt_frame_register) {
-                       u16 frame_type = le16_to_cpu(reg->frame_type);
-
-                       rdev_mgmt_frame_register(rdev, wdev,
-                                                frame_type, false);
-               }
-
                list_del(&reg->list);
-               kfree(reg);
+               spin_lock(&rdev->mlme_unreg_lock);
+               list_add_tail(&reg->list, &rdev->mlme_unreg);
+               spin_unlock(&rdev->mlme_unreg_lock);
+
+               schedule_work(&rdev->mlme_unreg_wk);
        }
 
        spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -496,16 +542,15 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
 
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
 {
-       struct cfg80211_mgmt_registration *reg, *tmp;
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        spin_lock_bh(&wdev->mgmt_registrations_lock);
-
-       list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
-               list_del(&reg->list);
-               kfree(reg);
-       }
-
+       spin_lock(&rdev->mlme_unreg_lock);
+       list_splice_tail_init(&wdev->mgmt_registrations, &rdev->mlme_unreg);
+       spin_unlock(&rdev->mlme_unreg_lock);
        spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+       cfg80211_process_mlme_unregistrations(rdev);
 }
 
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
index 76b41578a838e3bed8596c5950ffbd6881d54a64..5d8748b4c8a2d20f76c38a8394888c7fe56924bf 100644 (file)
@@ -2321,6 +2321,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
                        rdev->wiphy.frag_threshold = old_frag_threshold;
                        rdev->wiphy.rts_threshold = old_rts_threshold;
                        rdev->wiphy.coverage_class = old_coverage_class;
+                       return result;
                }
        }
        return 0;
@@ -7390,7 +7391,8 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
        int err;
 
        if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
-           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
+           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
                return -EOPNOTSUPP;
 
        if (!rdev->ops->set_mcast_rate)
index c6e83a7468c0c43baea32bbb0291c312017226f5..c23516d0f80794c7277812dd25dac250da4e5f0c 100644 (file)
@@ -733,6 +733,8 @@ static inline void
 rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev,
                         struct wireless_dev *wdev, u16 frame_type, bool reg)
 {
+       might_sleep();
+
        trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
        rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
        trace_rdev_return_void(&rdev->wiphy);
index aa2d75482017e1a258eb0cdb7271b2d615d8782e..b144485946f2e5ce2ec6411cd52462fa278b185e 100644 (file)
@@ -1004,7 +1004,7 @@ static u32 map_regdom_flags(u32 rd_flags)
 
 static const struct ieee80211_reg_rule *
 freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
-                  const struct ieee80211_regdomain *regd)
+                  const struct ieee80211_regdomain *regd, u32 bw)
 {
        int i;
        bool band_rule_found = false;
@@ -1028,7 +1028,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
                if (!band_rule_found)
                        band_rule_found = freq_in_rule_band(fr, center_freq);
 
-               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
+               bw_fits = reg_does_bw_fit(fr, center_freq, bw);
 
                if (band_rule_found && bw_fits)
                        return rr;
@@ -1040,14 +1040,26 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
        return ERR_PTR(-EINVAL);
 }
 
-const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
-                                              u32 center_freq)
+const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy,
+                                                u32 center_freq, u32 min_bw)
 {
-       const struct ieee80211_regdomain *regd;
+       const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
+       const struct ieee80211_reg_rule *reg_rule = NULL;
+       u32 bw;
 
-       regd = reg_get_regdomain(wiphy);
+       for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
+               reg_rule = freq_reg_info_regd(wiphy, center_freq, regd, bw);
+               if (!IS_ERR(reg_rule))
+                       return reg_rule;
+       }
 
-       return freq_reg_info_regd(wiphy, center_freq, regd);
+       return reg_rule;
+}
+
+const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
+                                              u32 center_freq)
+{
+       return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20));
 }
 EXPORT_SYMBOL(freq_reg_info);
 
@@ -1176,8 +1188,20 @@ static void handle_channel(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+       /* If we get a reg_rule we can assume that at least 5Mhz fit */
+       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+                            MHZ_TO_KHZ(10)))
+               bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+                            MHZ_TO_KHZ(20)))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+               bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags = IEEE80211_CHAN_NO_HT40;
+               bw_flags |= IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1695,9 +1719,15 @@ static void handle_channel_custom(struct wiphy *wiphy,
        const struct ieee80211_power_rule *power_rule = NULL;
        const struct ieee80211_freq_range *freq_range = NULL;
        u32 max_bandwidth_khz;
+       u32 bw;
 
-       reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
-                                     regd);
+       for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+               reg_rule = freq_reg_info_regd(wiphy,
+                                             MHZ_TO_KHZ(chan->center_freq),
+                                             regd, bw);
+               if (!IS_ERR(reg_rule))
+                       break;
+       }
 
        if (IS_ERR(reg_rule)) {
                REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
@@ -1721,8 +1751,20 @@ static void handle_channel_custom(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+       /* If we get a reg_rule we can assume that at least 5Mhz fit */
+       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+                            MHZ_TO_KHZ(10)))
+               bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+                            MHZ_TO_KHZ(20)))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+               bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags = IEEE80211_CHAN_NO_HT40;
+               bw_flags |= IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -2079,10 +2121,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
                reg_process_hint_core(reg_request);
                return;
        case NL80211_REGDOM_SET_BY_USER:
-               treatment = reg_process_hint_user(reg_request);
-               if (treatment == REG_REQ_IGNORE ||
-                   treatment == REG_REQ_ALREADY_SET)
-                       return;
+               reg_process_hint_user(reg_request);
                return;
        case NL80211_REGDOM_SET_BY_DRIVER:
                if (!wiphy)
@@ -2099,7 +2138,9 @@ static void reg_process_hint(struct regulatory_request *reg_request)
                goto out_free;
        }
 
-       /* This is required so that the orig_* parameters are saved */
+       /* This is required so that the orig_* parameters are saved.
+        * NOTE: treatment must be set for any case that reaches here!
+        */
        if (treatment == REG_REQ_ALREADY_SET && wiphy &&
            wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
                wiphy_update_regulatory(wiphy, reg_request->initiator);
index 18cead7645be0e75621e1b08a9e02621e75b7a4c..94af3d0657859e98c61d3f9b2babebcab722aec8 100644 (file)
@@ -115,7 +115,8 @@ static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
        rcu_read_unlock();
 }
 
-static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
+static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
+                                                 int tos, int oif,
                                                  const xfrm_address_t *saddr,
                                                  const xfrm_address_t *daddr,
                                                  int family)
@@ -127,14 +128,15 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
        if (unlikely(afinfo == NULL))
                return ERR_PTR(-EAFNOSUPPORT);
 
-       dst = afinfo->dst_lookup(net, tos, saddr, daddr);
+       dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
 
        xfrm_policy_put_afinfo(afinfo);
 
        return dst;
 }
 
-static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
+static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
+                                               int tos, int oif,
                                                xfrm_address_t *prev_saddr,
                                                xfrm_address_t *prev_daddr,
                                                int family)
@@ -153,7 +155,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
                daddr = x->coaddr;
        }
 
-       dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
+       dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
 
        if (!IS_ERR(dst)) {
                if (prev_saddr != saddr)
@@ -1373,15 +1375,15 @@ int __xfrm_sk_clone_policy(struct sock *sk)
 }
 
 static int
-xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
-              unsigned short family)
+xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
+              xfrm_address_t *remote, unsigned short family)
 {
        int err;
        struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
 
        if (unlikely(afinfo == NULL))
                return -EINVAL;
-       err = afinfo->get_saddr(net, local, remote);
+       err = afinfo->get_saddr(net, oif, local, remote);
        xfrm_policy_put_afinfo(afinfo);
        return err;
 }
@@ -1410,7 +1412,9 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
                        remote = &tmpl->id.daddr;
                        local = &tmpl->saddr;
                        if (xfrm_addr_any(local, tmpl->encap_family)) {
-                               error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
+                               error = xfrm_get_saddr(net, fl->flowi_oif,
+                                                      &tmp, remote,
+                                                      tmpl->encap_family);
                                if (error)
                                        goto fail;
                                local = &tmp;
@@ -1690,8 +1694,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 
                if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
                        family = xfrm[i]->props.family;
-                       dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
-                                             family);
+                       dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+                                             &saddr, &daddr, family);
                        err = PTR_ERR(dst);
                        if (IS_ERR(dst))
                                goto put_states;
index 0cebf1fc37a2743ba096747056fab6c927922b23..a8de9e3002000d7eaa76f6764797e5b231d187ff 100644 (file)
@@ -925,12 +925,10 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
                        return err;
 
                if (attrs[XFRMA_ADDRESS_FILTER]) {
-                       filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+                       filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
+                                        sizeof(*filter), GFP_KERNEL);
                        if (filter == NULL)
                                return -ENOMEM;
-
-                       memcpy(filter, nla_data(attrs[XFRMA_ADDRESS_FILTER]),
-                              sizeof(*filter));
                }
 
                if (attrs[XFRMA_PROTO])
index 4450fed91ab40f61306ab1af8bf38d470b1f4114..63e7d50e6a4fe3ca46cbd3b7ae064600bf6696ff 100644 (file)
@@ -12,6 +12,7 @@ hostprogs-y += tracex2
 hostprogs-y += tracex3
 hostprogs-y += tracex4
 hostprogs-y += tracex5
+hostprogs-y += tracex6
 hostprogs-y += lathist
 
 test_verifier-objs := test_verifier.o libbpf.o
@@ -25,6 +26,7 @@ tracex2-objs := bpf_load.o libbpf.o tracex2_user.o
 tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
 tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
 tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
+tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
 lathist-objs := bpf_load.o libbpf.o lathist_user.o
 
 # Tell kbuild to always build the programs
@@ -37,6 +39,7 @@ always += tracex2_kern.o
 always += tracex3_kern.o
 always += tracex4_kern.o
 always += tracex5_kern.o
+always += tracex6_kern.o
 always += tcbpf1_kern.o
 always += lathist_kern.o
 
@@ -51,6 +54,7 @@ HOSTLOADLIBES_tracex2 += -lelf
 HOSTLOADLIBES_tracex3 += -lelf
 HOSTLOADLIBES_tracex4 += -lelf -lrt
 HOSTLOADLIBES_tracex5 += -lelf
+HOSTLOADLIBES_tracex6 += -lelf
 HOSTLOADLIBES_lathist += -lelf
 
 # point this to your LLVM backend with bpf support
index c77c872fe8ee477c7cfabf5fd539824fc676b173..3a44d3a272af40119b379fe1930244909499fd8d 100644 (file)
@@ -31,6 +31,8 @@ static unsigned long long (*bpf_get_current_uid_gid)(void) =
        (void *) BPF_FUNC_get_current_uid_gid;
 static int (*bpf_get_current_comm)(void *buf, int buf_size) =
        (void *) BPF_FUNC_get_current_comm;
+static int (*bpf_perf_event_read)(void *map, int index) =
+       (void *) BPF_FUNC_perf_event_read;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c
new file mode 100644 (file)
index 0000000..be479c4
--- /dev/null
@@ -0,0 +1,27 @@
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+       .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       .key_size = sizeof(int),
+       .value_size = sizeof(u32),
+       .max_entries = 32,
+};
+
+SEC("kprobe/sys_write")
+int bpf_prog1(struct pt_regs *ctx)
+{
+       u64 count;
+       u32 key = bpf_get_smp_processor_id();
+       char fmt[] = "CPU-%d   %llu\n";
+
+       count = bpf_perf_event_read(&my_map, key);
+       bpf_trace_printk(fmt, sizeof(fmt), key, count);
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c
new file mode 100644 (file)
index 0000000..8ea4976
--- /dev/null
@@ -0,0 +1,72 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define SAMPLE_PERIOD  0x7fffffffffffffffULL
+
+static void test_bpf_perf_event(void)
+{
+       int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       int *pmu_fd = malloc(nr_cpus * sizeof(int));
+       int status, i;
+
+       struct perf_event_attr attr_insn_pmu = {
+               .freq = 0,
+               .sample_period = SAMPLE_PERIOD,
+               .inherit = 0,
+               .type = PERF_TYPE_HARDWARE,
+               .read_format = 0,
+               .sample_type = 0,
+               .config = 0,/* PMU: cycles */
+       };
+
+       for (i = 0; i < nr_cpus; i++) {
+               pmu_fd[i] = perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
+               if (pmu_fd[i] < 0) {
+                       printf("event syscall failed\n");
+                       goto exit;
+               }
+
+               bpf_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY);
+               ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+       }
+
+       status = system("ls > /dev/null");
+       if (status)
+               goto exit;
+       status = system("sleep 2");
+       if (status)
+               goto exit;
+
+exit:
+       for (i = 0; i < nr_cpus; i++)
+               close(pmu_fd[i]);
+       close(map_fd[0]);
+       free(pmu_fd);
+}
+
+int main(int argc, char **argv)
+{
+       char filename[256];
+
+       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+       if (load_bpf_file(filename)) {
+               printf("%s", bpf_log_buf);
+               return 1;
+       }
+
+       test_bpf_perf_event();
+       read_trace_pipe();
+
+       return 0;
+}
index 9cb8522d8d22a826caaec968e82c77516339ca01..f3d3fb42b8735e76aa54bd4c433574f7afc43b0e 100755 (executable)
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
 my $kconfig = $ARGV[1];
 my $lsmod_file = $ENV{'LSMOD'};
 
-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
 chomp @makefiles;
 
 my %depends;
index 9ed32502470e9bc1d776d5d925ee48be587baa17..5ebb8968793670d6a23ee6e6d2b30df6243a63ca 100644 (file)
@@ -406,6 +406,7 @@ static __init int yama_init(void)
         */
        if (!security_module_enable("yama"))
                return 0;
+       yama_add_hooks();
 #endif
        pr_info("Yama: becoming mindful.\n");
 
index 7bb988fa6b6d17764e08f39ecbf45f31ee2ff2ee..2a153d260836704011b6eadddfbc9b463e1b37b3 100644 (file)
@@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s,
            s->data_block_counter != UINT_MAX)
                data_block_counter = s->data_block_counter;
 
-       if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) ||
-           (s->data_block_counter == UINT_MAX)) {
+       if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
+            data_block_counter == s->tx_first_dbc) ||
+           s->data_block_counter == UINT_MAX) {
                lost = false;
        } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
                lost = data_block_counter != s->data_block_counter;
index 26b909329e54d96d7fcb075f9538599aaf30cb09..b2cf9e75693b643ce0ac81ebdef15458fd2d4018 100644 (file)
@@ -157,6 +157,8 @@ struct amdtp_stream {
 
        /* quirk: fixed interval of dbc between previos/current packets. */
        unsigned int tx_dbc_interval;
+       /* quirk: indicate the value of dbc field in a first packet. */
+       unsigned int tx_first_dbc;
 
        bool callbacked;
        wait_queue_head_t callback_wait;
index c670db4eee70d42c91db2904354bf391267c22ef..c94a432f7cc653dca45316ea67bdc481b9e8e887 100644 (file)
@@ -248,10 +248,16 @@ efw_probe(struct fw_unit *unit,
        err = get_hardware_info(efw);
        if (err < 0)
                goto error;
-       if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2)
-               efw->is_af2 = true;
+       /* AudioFire8 (since 2009) and AudioFirePre8 */
        if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
                efw->is_af9 = true;
+       /* These models uses the same firmware. */
+       if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
+           entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
+           entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
+           entry->model_id == MODEL_GIBSON_RIP ||
+           entry->model_id == MODEL_GIBSON_GOLDTOP)
+               efw->is_fireworks3 = true;
 
        snd_efw_proc_init(efw);
 
index c33252b7bc847501c4fc2c7b2c18d07a9ec18898..084d414b228cf425dc99440cc081d65459bbe175 100644 (file)
@@ -70,8 +70,8 @@ struct snd_efw {
        bool resp_addr_changable;
 
        /* for quirks */
-       bool is_af2;
        bool is_af9;
+       bool is_fireworks3;
        u32 firmware_version;
 
        unsigned int midi_in_ports;
index a0762dd6231e6080eb9ab86343c413db0bd64492..7e353f1f7bff359ea8845630aeff2521759e508f 100644 (file)
@@ -172,9 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
        efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
        /* Fireworks reset dbc at bus reset. */
        efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
-       /* AudioFire2 starts packets with non-zero dbc. */
-       if (efw->is_af2)
-               efw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK;
+       /*
+        * But Recent firmwares starts packets with non-zero dbc.
+        * Driver version 5.7.6 installs firmware version 5.7.3.
+        */
+       if (efw->is_fireworks3 &&
+           (efw->firmware_version == 0x5070000 ||
+            efw->firmware_version == 0x5070300 ||
+            efw->firmware_version == 0x5080000))
+               efw->tx_stream.tx_first_dbc = 0x02;
        /* AudioFire9 always reports wrong dbs. */
        if (efw->is_af9)
                efw->tx_stream.flags |= CIP_WRONG_DBS;
index b2da19b60f4e25cf6ec858832d6f33349313ca66..358f16195483f6b51c3a84a3cff672d8696951a6 100644 (file)
@@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus)
 
        offset = snd_hdac_chip_readl(bus, LLCH);
 
-       if (offset < 0)
-               return -EIO;
-
        /* Lets walk the linked capabilities list */
        do {
                cur_cap = _snd_hdac_chip_read(l, bus, offset);
 
-               if (cur_cap < 0)
-                       return -EIO;
-
                dev_dbg(bus->dev, "Capability version: 0x%x\n",
                                ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF));
 
index f8ffbdbb450d785e281bd7a0aae3c6d8a8c2a2ab..3de47dd1a76d856f95c2051b1bde6e55522f2b55 100644 (file)
@@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
                if (stream->direction != substream->stream)
                        continue;
 
-               if (stream->opened) {
+               if (!stream->opened) {
                        if (!hstream->decoupled)
                                snd_hdac_ext_stream_decouple(ebus, hstream, true);
                        res = hstream;
index c456c04e0928d2c4ef9e65d114bdec3bf6b9332f..374ea53288ca25ff6093467012afd2607192f505 100644 (file)
@@ -5189,6 +5189,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+       SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5290,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
index 6492bca8c70f8bbfdf31e2fb13321904f5a5b00a..4ca12665ff730c6980e2a4366dd195e47f94aecd 100644 (file)
@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
        int changed;
 
        mutex_lock(&chip->mutex);
-       changed = !value->value.integer.value[0] != chip->dac_mute;
+       changed = (!value->value.integer.value[0]) != chip->dac_mute;
        if (changed) {
                chip->dac_mute = !value->value.integer.value[0];
                chip->model.update_dac_mute(chip);
index 2ae9619443d15dbeaf128cab385347db6bf26369..1d651b8a89570404cd306f239b17e939b1d5fa81 100644 (file)
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM
        bool
        select SND_DMAENGINE_PCM
 
+config SND_SOC_TOPOLOGY
+       bool
+
 # All the supported SoCs
 source "sound/soc/adi/Kconfig"
 source "sound/soc/atmel/Kconfig"
index e189903fabf42958eff143e487999f0f013b85d7..669648b41d3027adf29ead27eda5f72a6ed0aaf1 100644 (file)
@@ -1,6 +1,9 @@
 snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
 snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
+
+ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
 snd-soc-core-objs += soc-topology.o
+endif
 
 ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
 snd-soc-core-objs += soc-generic-dmaengine-pcm.o
index d7ec4756e45bf9bab0c08059ee74c39b4ea3c812..8e36198474d94d59cd8b7bda10e6be434103fa55 100644 (file)
@@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
        case SND_SOC_DAIFMT_RIGHT_J:
                if (params_width(params) == 16) {
                        snd_soc_update_bits(codec, CS4265_DAC_CTL,
-                               CS4265_DAC_CTL_DIF, (1 << 5));
+                               CS4265_DAC_CTL_DIF, (2 << 4));
                        snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
-                               CS4265_SPDIF_CTL2_DIF, (1 << 7));
+                               CS4265_SPDIF_CTL2_DIF, (2 << 6));
                } else {
                        snd_soc_update_bits(codec, CS4265_DAC_CTL,
-                               CS4265_DAC_CTL_DIF, (3 << 5));
+                               CS4265_DAC_CTL_DIF, (3 << 4));
                        snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
-                               CS4265_SPDIF_CTL2_DIF, (1 << 7));
+                               CS4265_SPDIF_CTL2_DIF, (3 << 6));
                }
                break;
        case SND_SOC_DAIFMT_LEFT_J:
@@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
                snd_soc_update_bits(codec, CS4265_ADC_CTL,
                        CS4265_ADC_DIF, 0);
                snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
-                       CS4265_SPDIF_CTL2_DIF, (1 << 6));
+                       CS4265_SPDIF_CTL2_DIF, 0);
 
                break;
        default:
index e9cc3aae5366d30d0522b105ed11fdb88943948f..961bd7e5877ee42c3e50f7a3e2e826a1ffab9cc8 100644 (file)
@@ -3341,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
                break;
 
        case RT5645_DMIC_DATA_GPIO5:
+               regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
+                       RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO);
                regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
                        RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5);
                regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
index 0353a6a273ab4ca2bd5136c28aaedee14c6e54b2..278bb9f464c4120b681c7ad5c6086df89d6de3ab 100644 (file)
 #define RT5645_GP6_PIN_SFT                     6
 #define RT5645_GP6_PIN_GPIO6                   (0x0 << 6)
 #define RT5645_GP6_PIN_DMIC2_SDA               (0x1 << 6)
+#define RT5645_I2S2_DAC_PIN_MASK               (0x1 << 4)
+#define RT5645_I2S2_DAC_PIN_SFT                        4
+#define RT5645_I2S2_DAC_PIN_I2S                        (0x0 << 4)
+#define RT5645_I2S2_DAC_PIN_GPIO               (0x1 << 4)
 #define RT5645_GP8_PIN_MASK                    (0x1 << 3)
 #define RT5645_GP8_PIN_SFT                     3
 #define RT5645_GP8_PIN_GPIO8                   (0x0 << 3)
index 4c01bb43928d136c93963730f1149f9d9bf4a66b..5bbaa667bec1c608c35968c983eaeece020ad8db 100644 (file)
@@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
        if (byt == NULL)
                return -ENOMEM;
 
+       byt->dev = dev;
+
        ipc = &byt->ipc;
        ipc->dev = dev;
        ipc->ops.tx_msg = byt_tx_msg;
index f95f271aab0ce30412954428b426af2461438dea..f6efa9d4acadd5e056ea0a0494ed51da859fbc3a 100644 (file)
@@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
        if (hsw == NULL)
                return -ENOMEM;
 
+       hsw->dev = dev;
+
        ipc = &hsw->ipc;
        ipc->dev = dev;
        ipc->ops.tx_msg = hsw_tx_msg;
index 59ac211f8fe7c273c84dcd67ac94bb27cb760564..31068b8f3db0dd965cc2bdc6742684dc2cb8d8ea 100644 (file)
@@ -33,6 +33,7 @@
 #include <sound/soc.h>
 #include <sound/soc-dapm.h>
 #include <sound/soc-topology.h>
+#include <sound/tlv.h>
 
 /*
  * We make several passes over the data (since it wont necessarily be ordered)
@@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
                        k->put = bops[i].put;
                if (k->get == NULL && bops[i].id == hdr->ops.get)
                        k->get = bops[i].get;
-               if (k->info == NULL && ops[i].id == hdr->ops.info)
+               if (k->info == NULL && bops[i].id == hdr->ops.info)
                        k->info = bops[i].info;
        }
 
@@ -579,28 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
        return 0;
 }
 
+
+static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg,
+       struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale)
+{
+       unsigned int item_len = 2 * sizeof(unsigned int);
+       unsigned int *p;
+
+       p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       p[0] = SNDRV_CTL_TLVT_DB_SCALE;
+       p[1] = item_len;
+       p[2] = scale->min;
+       p[3] = (scale->step & TLV_DB_SCALE_MASK)
+                       | (scale->mute ? TLV_DB_SCALE_MUTE : 0);
+
+       kc->tlv.p = (void *)p;
+       return 0;
+}
+
 static int soc_tplg_create_tlv(struct soc_tplg *tplg,
-       struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv)
+       struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc)
 {
-       struct snd_ctl_tlv *tlv;
-       int size;
+       struct snd_soc_tplg_ctl_tlv *tplg_tlv;
 
-       if (tplg_tlv->count == 0)
+       if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
                return 0;
 
-       size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) &
-               ~(sizeof(unsigned int) - 1));
-       tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL);
-       if (tlv == NULL)
-               return -ENOMEM;
-
-       dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n",
-               tplg_tlv->numid, size);
+       if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+               kc->tlv.c = snd_soc_bytes_tlv_callback;
+       } else {
+               tplg_tlv = &tc->tlv;
+               switch (tplg_tlv->type) {
+               case SNDRV_CTL_TLVT_DB_SCALE:
+                       return soc_tplg_create_tlv_db_scale(tplg, kc,
+                                       &tplg_tlv->scale);
 
-       tlv->numid = tplg_tlv->numid;
-       tlv->length = size;
-       memcpy(&tlv->tlv[0], tplg_tlv->data, size);
-       kc->tlv.p = (void *)tlv;
+               /* TODO: add support for other TLV types */
+               default:
+                       dev_dbg(tplg->dev, "Unsupported TLV type %d\n",
+                                       tplg_tlv->type);
+                       return -EINVAL;
+               }
+       }
 
        return 0;
 }
@@ -772,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
                }
 
                /* create any TLV data */
-               soc_tplg_create_tlv(tplg, &kc, &mc->tlv);
+               soc_tplg_create_tlv(tplg, &kc, &mc->hdr);
 
                /* register control here */
                err = soc_tplg_add_kcontrol(tplg, &kc,
@@ -1350,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
        template.reg = w->reg;
        template.shift = w->shift;
        template.mask = w->mask;
+       template.subseq = w->subseq;
        template.on_val = w->invert ? 0 : 1;
        template.off_val = w->invert ? 1 : 0;
        template.ignore_suspend = w->ignore_suspend;
index 1fab9778807a0015f2578f0504306ed852eb4932..0450593980fd3525a65feca17dccea0e9940506e 100644 (file)
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
        int err = -ENODEV;
 
        down_read(&chip->shutdown_rwsem);
-       if (chip->probing && chip->in_pm)
+       if (chip->probing || chip->in_pm)
                err = 0;
        else if (!chip->shutdown)
                err = usb_autopm_get_interface(chip->pm_intf);
index 094ddaee104c73d7caae22d851d79629c4715cd3..d31fac19c30b2d298ab2cf3a710b9b27c5764144 100644 (file)
@@ -638,7 +638,7 @@ ifndef DESTDIR
 prefix ?= $(HOME)
 endif
 bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(abspath $(prefix)/$(bindir_relative))
 mandir = share/man
 infodir = share/info
 perfexecdir = libexec/perf-core
index 53e8bb7bc8521a09f1347d48de2a0dd1eeb5e0bc..2a5d8d7698aedb8c82bdf8488f1fb62ded5b438a 100644 (file)
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
        else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
                update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
-               update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+               update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, TRANSACTION_START))
                update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
                                " #   %5.2f%% aborted cycles         ",
                                100.0 * ((total2-avg) / total));
        } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / transaction   ", ratio);
        } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / elision       ", ratio);
index 08c2a36ef7a9b429169fa1bd2f19f9b5340348ad..4124593696862fcb370fee15bcf7e34e11cdf9e0 100644 (file)
@@ -19,6 +19,8 @@
  *   - PACKET_FANOUT_LB
  *   - PACKET_FANOUT_CPU
  *   - PACKET_FANOUT_ROLLOVER
+ *   - PACKET_FANOUT_CBPF
+ *   - PACKET_FANOUT_EBPF
  *
  * Todo:
  * - functionality: PACKET_FANOUT_FLAG_DEFRAG
@@ -44,7 +46,9 @@
 #include <arpa/inet.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <linux/unistd.h>      /* for __NR_bpf */
 #include <linux/filter.h>
+#include <linux/bpf.h>
 #include <linux/if_packet.h>
 #include <net/ethernet.h>
 #include <netinet/ip.h>
@@ -91,6 +95,51 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
        return fd;
 }
 
+static void sock_fanout_set_ebpf(int fd)
+{
+       const int len_off = __builtin_offsetof(struct __sk_buff, len);
+       struct bpf_insn prog[] = {
+               { BPF_ALU64 | BPF_MOV | BPF_X,   6, 1, 0, 0 },
+               { BPF_LDX   | BPF_W   | BPF_MEM, 0, 6, len_off, 0 },
+               { BPF_JMP   | BPF_JGE | BPF_K,   0, 0, 1, DATA_LEN },
+               { BPF_JMP   | BPF_JA  | BPF_K,   0, 0, 4, 0 },
+               { BPF_LD    | BPF_B   | BPF_ABS, 0, 0, 0, 0x50 },
+               { BPF_JMP   | BPF_JEQ | BPF_K,   0, 0, 2, DATA_CHAR },
+               { BPF_JMP   | BPF_JEQ | BPF_K,   0, 0, 1, DATA_CHAR_1 },
+               { BPF_ALU   | BPF_MOV | BPF_K,   0, 0, 0, 0 },
+               { BPF_JMP   | BPF_EXIT,          0, 0, 0, 0 }
+       };
+       char log_buf[512];
+       union bpf_attr attr;
+       int pfd;
+
+       memset(&attr, 0, sizeof(attr));
+       attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+       attr.insns = (unsigned long) prog;
+       attr.insn_cnt = sizeof(prog) / sizeof(prog[0]);
+       attr.license = (unsigned long) "GPL";
+       attr.log_buf = (unsigned long) log_buf,
+       attr.log_size = sizeof(log_buf),
+       attr.log_level = 1,
+
+       pfd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+       if (pfd < 0) {
+               perror("bpf");
+               fprintf(stderr, "bpf verifier:\n%s\n", log_buf);
+               exit(1);
+       }
+
+       if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &pfd, sizeof(pfd))) {
+               perror("fanout data ebpf");
+               exit(1);
+       }
+
+       if (close(pfd)) {
+               perror("close ebpf");
+               exit(1);
+       }
+}
+
 static char *sock_fanout_open_ring(int fd)
 {
        struct tpacket_req req = {
@@ -115,8 +164,8 @@ static char *sock_fanout_open_ring(int fd)
 
        ring = mmap(0, req.tp_block_size * req.tp_block_nr,
                    PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
-       if (!ring) {
-               fprintf(stderr, "packetsock ring mmap\n");
+       if (ring == MAP_FAILED) {
+               perror("packetsock ring mmap");
                exit(1);
        }
 
@@ -209,6 +258,7 @@ static int test_datapath(uint16_t typeflags, int port_off,
 {
        const int expect0[] = { 0, 0 };
        char *rings[2];
+       uint8_t type = typeflags & 0xFF;
        int fds[2], fds_udp[2][2], ret;
 
        fprintf(stderr, "test: datapath 0x%hx\n", typeflags);
@@ -219,6 +269,11 @@ static int test_datapath(uint16_t typeflags, int port_off,
                fprintf(stderr, "ERROR: failed open\n");
                exit(1);
        }
+       if (type == PACKET_FANOUT_CBPF)
+               sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
+       else if (type == PACKET_FANOUT_EBPF)
+               sock_fanout_set_ebpf(fds[0]);
+
        rings[0] = sock_fanout_open_ring(fds[0]);
        rings[1] = sock_fanout_open_ring(fds[1]);
        pair_udp_open(fds_udp[0], PORT_BASE);
@@ -227,11 +282,11 @@ static int test_datapath(uint16_t typeflags, int port_off,
 
        /* Send data, but not enough to overflow a queue */
        pair_udp_send(fds_udp[0], 15);
-       pair_udp_send(fds_udp[1], 5);
+       pair_udp_send_char(fds_udp[1], 5, DATA_CHAR_1);
        ret = sock_fanout_read(fds, rings, expect1);
 
        /* Send more data, overflow the queue */
-       pair_udp_send(fds_udp[0], 15);
+       pair_udp_send_char(fds_udp[0], 15, DATA_CHAR_1);
        /* TODO: ensure consistent order between expect1 and expect2 */
        ret |= sock_fanout_read(fds, rings, expect2);
 
@@ -275,6 +330,7 @@ int main(int argc, char **argv)
        const int expect_rb[2][2]       = { { 15, 5 },  { 20, 15 } };
        const int expect_cpu0[2][2]     = { { 20, 0 },  { 20, 0 } };
        const int expect_cpu1[2][2]     = { { 0, 20 },  { 0, 20 } };
+       const int expect_bpf[2][2]      = { { 15, 5 },  { 15, 20 } };
        int port_off = 2, tries = 5, ret;
 
        test_control_single();
@@ -296,6 +352,11 @@ int main(int argc, char **argv)
        ret |= test_datapath(PACKET_FANOUT_ROLLOVER,
                             port_off, expect_rb[0], expect_rb[1]);
 
+       ret |= test_datapath(PACKET_FANOUT_CBPF,
+                            port_off, expect_bpf[0], expect_bpf[1]);
+       ret |= test_datapath(PACKET_FANOUT_EBPF,
+                            port_off, expect_bpf[0], expect_bpf[1]);
+
        set_cpuaffinity(0);
        ret |= test_datapath(PACKET_FANOUT_CPU, port_off,
                             expect_cpu0[0], expect_cpu0[1]);
index 37da54ac85a9583f1d364b8c88ed3fcb7d9f6b84..24bc7ec1be7dab217689fbda39d9c503a1bc6bfc 100644 (file)
@@ -30,6 +30,7 @@
 
 #define DATA_LEN                       100
 #define DATA_CHAR                      'a'
+#define DATA_CHAR_1                    'b'
 
 #define PORT_BASE                      8000
 
 # define __maybe_unused                __attribute__ ((__unused__))
 #endif
 
-static __maybe_unused void pair_udp_setfilter(int fd)
+static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
 {
        struct sock_filter bpf_filter[] = {
                { 0x80, 0, 0, 0x00000000 },  /* LD  pktlen                    */
-               { 0x35, 0, 5, DATA_LEN   },  /* JGE DATA_LEN  [f goto nomatch]*/
+               { 0x35, 0, 4, DATA_LEN   },  /* JGE DATA_LEN  [f goto nomatch]*/
                { 0x30, 0, 0, 0x00000050 },  /* LD  ip[80]                    */
-               { 0x15, 0, 3, DATA_CHAR  },  /* JEQ DATA_CHAR [f goto nomatch]*/
-               { 0x30, 0, 0, 0x00000051 },  /* LD  ip[81]                    */
-               { 0x15, 0, 1, DATA_CHAR  },  /* JEQ DATA_CHAR [f goto nomatch]*/
+               { 0x15, 1, 0, DATA_CHAR  },  /* JEQ DATA_CHAR   [t goto match]*/
+               { 0x15, 0, 1, DATA_CHAR_1},  /* JEQ DATA_CHAR_1 [t goto match]*/
                { 0x06, 0, 0, 0x00000060 },  /* RET match                     */
                { 0x06, 0, 0, 0x00000000 },  /* RET no match                  */
        };
        struct sock_fprog bpf_prog;
 
+       if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
+               bpf_filter[5].code = 0x16;   /* RET A                         */
+
        bpf_prog.filter = bpf_filter;
        bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
-       if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
+       if (setsockopt(fd, lvl, optnum, &bpf_prog,
                       sizeof(bpf_prog))) {
                perror("setsockopt SO_ATTACH_FILTER");
                exit(1);
        }
 }
 
+static __maybe_unused void pair_udp_setfilter(int fd)
+{
+       sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
+}
+
 static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
 {
        struct sockaddr_in saddr, daddr;
@@ -96,11 +104,11 @@ static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
        }
 }
 
-static __maybe_unused void pair_udp_send(int fds[], int num)
+static __maybe_unused void pair_udp_send_char(int fds[], int num, char payload)
 {
        char buf[DATA_LEN], rbuf[DATA_LEN];
 
-       memset(buf, DATA_CHAR, sizeof(buf));
+       memset(buf, payload, sizeof(buf));
        while (num--) {
                /* Should really handle EINTR and EAGAIN */
                if (write(fds[0], buf, sizeof(buf)) != sizeof(buf)) {
@@ -118,6 +126,11 @@ static __maybe_unused void pair_udp_send(int fds[], int num)
        }
 }
 
+static __maybe_unused void pair_udp_send(int fds[], int num)
+{
+       return pair_udp_send_char(fds, num, DATA_CHAR);
+}
+
 static __maybe_unused void pair_udp_close(int fds[])
 {
        close(fds[0]);